]>
Commit | Line | Data |
---|---|---|
c8afe684 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/spinlock.h> | |
19 | #include <linux/shmem_fs.h> | |
05b84911 | 20 | #include <linux/dma-buf.h> |
01c8f1c4 | 21 | #include <linux/pfn_t.h> |
c8afe684 RC |
22 | |
23 | #include "msm_drv.h" | |
fde5de6c | 24 | #include "msm_fence.h" |
c8afe684 | 25 | #include "msm_gem.h" |
7198e6b0 | 26 | #include "msm_gpu.h" |
871d812a | 27 | #include "msm_mmu.h" |
c8afe684 | 28 | |
871d812a RC |
29 | static dma_addr_t physaddr(struct drm_gem_object *obj) |
30 | { | |
31 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
32 | struct msm_drm_private *priv = obj->dev->dev_private; | |
33 | return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + | |
34 | priv->vram.paddr; | |
35 | } | |
36 | ||
072f1f91 RC |
37 | static bool use_pages(struct drm_gem_object *obj) |
38 | { | |
39 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
40 | return !msm_obj->vram_node; | |
41 | } | |
42 | ||
871d812a RC |
43 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
44 | static struct page **get_pages_vram(struct drm_gem_object *obj, | |
45 | int npages) | |
46 | { | |
47 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
48 | struct msm_drm_private *priv = obj->dev->dev_private; | |
49 | dma_addr_t paddr; | |
50 | struct page **p; | |
51 | int ret, i; | |
52 | ||
53 | p = drm_malloc_ab(npages, sizeof(struct page *)); | |
54 | if (!p) | |
55 | return ERR_PTR(-ENOMEM); | |
56 | ||
4e64e553 | 57 | ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); |
871d812a RC |
58 | if (ret) { |
59 | drm_free_large(p); | |
60 | return ERR_PTR(ret); | |
61 | } | |
62 | ||
63 | paddr = physaddr(obj); | |
64 | for (i = 0; i < npages; i++) { | |
65 | p[i] = phys_to_page(paddr); | |
66 | paddr += PAGE_SIZE; | |
67 | } | |
68 | ||
69 | return p; | |
70 | } | |
c8afe684 RC |
71 | |
72 | /* called with dev->struct_mutex held */ | |
73 | static struct page **get_pages(struct drm_gem_object *obj) | |
74 | { | |
75 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
76 | ||
77 | if (!msm_obj->pages) { | |
78 | struct drm_device *dev = obj->dev; | |
871d812a | 79 | struct page **p; |
c8afe684 RC |
80 | int npages = obj->size >> PAGE_SHIFT; |
81 | ||
072f1f91 | 82 | if (use_pages(obj)) |
0cdbe8ac | 83 | p = drm_gem_get_pages(obj); |
871d812a RC |
84 | else |
85 | p = get_pages_vram(obj, npages); | |
86 | ||
c8afe684 RC |
87 | if (IS_ERR(p)) { |
88 | dev_err(dev->dev, "could not get pages: %ld\n", | |
89 | PTR_ERR(p)); | |
90 | return p; | |
91 | } | |
92 | ||
93 | msm_obj->sgt = drm_prime_pages_to_sg(p, npages); | |
1f70e079 | 94 | if (IS_ERR(msm_obj->sgt)) { |
c8afe684 | 95 | dev_err(dev->dev, "failed to allocate sgt\n"); |
1f70e079 | 96 | return ERR_CAST(msm_obj->sgt); |
c8afe684 RC |
97 | } |
98 | ||
99 | msm_obj->pages = p; | |
100 | ||
101 | /* For non-cached buffers, ensure the new pages are clean | |
102 | * because display controller, GPU, etc. are not coherent: | |
103 | */ | |
104 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
105 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | |
106 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
107 | } | |
108 | ||
109 | return msm_obj->pages; | |
110 | } | |
111 | ||
112 | static void put_pages(struct drm_gem_object *obj) | |
113 | { | |
114 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
115 | ||
116 | if (msm_obj->pages) { | |
117 | /* For non-cached buffers, ensure the new pages are clean | |
118 | * because display controller, GPU, etc. are not coherent: | |
119 | */ | |
120 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | |
121 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | |
122 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | |
123 | sg_free_table(msm_obj->sgt); | |
124 | kfree(msm_obj->sgt); | |
125 | ||
072f1f91 | 126 | if (use_pages(obj)) |
871d812a | 127 | drm_gem_put_pages(obj, msm_obj->pages, true, false); |
1ffa2425 | 128 | else { |
871d812a | 129 | drm_mm_remove_node(msm_obj->vram_node); |
1ffa2425 MR |
130 | drm_free_large(msm_obj->pages); |
131 | } | |
871d812a | 132 | |
c8afe684 RC |
133 | msm_obj->pages = NULL; |
134 | } | |
135 | } | |
136 | ||
05b84911 RC |
137 | struct page **msm_gem_get_pages(struct drm_gem_object *obj) |
138 | { | |
139 | struct drm_device *dev = obj->dev; | |
140 | struct page **p; | |
141 | mutex_lock(&dev->struct_mutex); | |
142 | p = get_pages(obj); | |
143 | mutex_unlock(&dev->struct_mutex); | |
144 | return p; | |
145 | } | |
146 | ||
147 | void msm_gem_put_pages(struct drm_gem_object *obj) | |
148 | { | |
149 | /* when we start tracking the pin count, then do something here */ | |
150 | } | |
151 | ||
c8afe684 RC |
152 | int msm_gem_mmap_obj(struct drm_gem_object *obj, |
153 | struct vm_area_struct *vma) | |
154 | { | |
155 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
156 | ||
157 | vma->vm_flags &= ~VM_PFNMAP; | |
158 | vma->vm_flags |= VM_MIXEDMAP; | |
159 | ||
160 | if (msm_obj->flags & MSM_BO_WC) { | |
161 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
162 | } else if (msm_obj->flags & MSM_BO_UNCACHED) { | |
163 | vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); | |
164 | } else { | |
165 | /* | |
166 | * Shunt off cached objs to shmem file so they have their own | |
167 | * address_space (so unmap_mapping_range does what we want, | |
168 | * in particular in the case of mmap'd dmabufs) | |
169 | */ | |
170 | fput(vma->vm_file); | |
171 | get_file(obj->filp); | |
172 | vma->vm_pgoff = 0; | |
173 | vma->vm_file = obj->filp; | |
174 | ||
175 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
176 | } | |
177 | ||
178 | return 0; | |
179 | } | |
180 | ||
181 | int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
182 | { | |
183 | int ret; | |
184 | ||
185 | ret = drm_gem_mmap(filp, vma); | |
186 | if (ret) { | |
187 | DBG("mmap failed: %d", ret); | |
188 | return ret; | |
189 | } | |
190 | ||
191 | return msm_gem_mmap_obj(vma->vm_private_data, vma); | |
192 | } | |
193 | ||
11bac800 | 194 | int msm_gem_fault(struct vm_fault *vmf) |
c8afe684 | 195 | { |
11bac800 | 196 | struct vm_area_struct *vma = vmf->vma; |
c8afe684 | 197 | struct drm_gem_object *obj = vma->vm_private_data; |
c8afe684 | 198 | struct drm_device *dev = obj->dev; |
d78d383a | 199 | struct msm_drm_private *priv = dev->dev_private; |
c8afe684 RC |
200 | struct page **pages; |
201 | unsigned long pfn; | |
202 | pgoff_t pgoff; | |
203 | int ret; | |
204 | ||
d78d383a RC |
205 | /* This should only happen if userspace tries to pass a mmap'd |
206 | * but unfaulted gem bo vaddr into submit ioctl, triggering | |
207 | * a page fault while struct_mutex is already held. This is | |
208 | * not a valid use-case so just bail. | |
209 | */ | |
210 | if (priv->struct_mutex_task == current) | |
211 | return VM_FAULT_SIGBUS; | |
212 | ||
c8afe684 RC |
213 | /* Make sure we don't parallel update on a fault, nor move or remove |
214 | * something from beneath our feet | |
215 | */ | |
216 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
217 | if (ret) | |
218 | goto out; | |
219 | ||
220 | /* make sure we have pages attached now */ | |
221 | pages = get_pages(obj); | |
222 | if (IS_ERR(pages)) { | |
223 | ret = PTR_ERR(pages); | |
224 | goto out_unlock; | |
225 | } | |
226 | ||
227 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
1a29d85e | 228 | pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
c8afe684 | 229 | |
871d812a | 230 | pfn = page_to_pfn(pages[pgoff]); |
c8afe684 | 231 | |
1a29d85e | 232 | VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, |
c8afe684 RC |
233 | pfn, pfn << PAGE_SHIFT); |
234 | ||
1a29d85e | 235 | ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); |
c8afe684 RC |
236 | |
237 | out_unlock: | |
238 | mutex_unlock(&dev->struct_mutex); | |
239 | out: | |
240 | switch (ret) { | |
241 | case -EAGAIN: | |
c8afe684 RC |
242 | case 0: |
243 | case -ERESTARTSYS: | |
244 | case -EINTR: | |
505886d5 RC |
245 | case -EBUSY: |
246 | /* | |
247 | * EBUSY is ok: this just means that another thread | |
248 | * already did the job. | |
249 | */ | |
c8afe684 RC |
250 | return VM_FAULT_NOPAGE; |
251 | case -ENOMEM: | |
252 | return VM_FAULT_OOM; | |
253 | default: | |
254 | return VM_FAULT_SIGBUS; | |
255 | } | |
256 | } | |
257 | ||
258 | /** get mmap offset */ | |
259 | static uint64_t mmap_offset(struct drm_gem_object *obj) | |
260 | { | |
261 | struct drm_device *dev = obj->dev; | |
262 | int ret; | |
263 | ||
264 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
265 | ||
266 | /* Make it mmapable */ | |
267 | ret = drm_gem_create_mmap_offset(obj); | |
268 | ||
269 | if (ret) { | |
270 | dev_err(dev->dev, "could not allocate mmap offset\n"); | |
271 | return 0; | |
272 | } | |
273 | ||
274 | return drm_vma_node_offset_addr(&obj->vma_node); | |
275 | } | |
276 | ||
277 | uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) | |
278 | { | |
279 | uint64_t offset; | |
280 | mutex_lock(&obj->dev->struct_mutex); | |
281 | offset = mmap_offset(obj); | |
282 | mutex_unlock(&obj->dev->struct_mutex); | |
283 | return offset; | |
284 | } | |
285 | ||
4fe5f65e RC |
286 | static void |
287 | put_iova(struct drm_gem_object *obj) | |
288 | { | |
289 | struct drm_device *dev = obj->dev; | |
290 | struct msm_drm_private *priv = obj->dev->dev_private; | |
291 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
292 | int id; | |
293 | ||
294 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
295 | ||
296 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | |
de85d2b3 RC |
297 | if (!priv->aspace[id]) |
298 | continue; | |
667ce33e RC |
299 | msm_gem_unmap_vma(priv->aspace[id], |
300 | &msm_obj->domain[id], msm_obj->sgt); | |
4fe5f65e RC |
301 | } |
302 | } | |
303 | ||
c8afe684 RC |
304 | /* should be called under struct_mutex.. although it can be called |
305 | * from atomic context without struct_mutex to acquire an extra | |
306 | * iova ref if you know one is already held. | |
307 | * | |
308 | * That means when I do eventually need to add support for unpinning | |
309 | * the refcnt counter needs to be atomic_t. | |
310 | */ | |
311 | int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | |
78babc16 | 312 | uint64_t *iova) |
c8afe684 RC |
313 | { |
314 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
315 | int ret = 0; | |
316 | ||
317 | if (!msm_obj->domain[id].iova) { | |
318 | struct msm_drm_private *priv = obj->dev->dev_private; | |
871d812a RC |
319 | struct page **pages = get_pages(obj); |
320 | ||
c8afe684 RC |
321 | if (IS_ERR(pages)) |
322 | return PTR_ERR(pages); | |
871d812a RC |
323 | |
324 | if (iommu_present(&platform_bus_type)) { | |
667ce33e RC |
325 | ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], |
326 | msm_obj->sgt, obj->size >> PAGE_SHIFT); | |
871d812a RC |
327 | } else { |
328 | msm_obj->domain[id].iova = physaddr(obj); | |
329 | } | |
c8afe684 RC |
330 | } |
331 | ||
332 | if (!ret) | |
333 | *iova = msm_obj->domain[id].iova; | |
334 | ||
335 | return ret; | |
336 | } | |
337 | ||
2638d90a | 338 | /* get iova, taking a reference. Should have a matching put */ |
78babc16 | 339 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) |
c8afe684 | 340 | { |
edd4fc63 | 341 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
c8afe684 | 342 | int ret; |
edd4fc63 RC |
343 | |
344 | /* this is safe right now because we don't unmap until the | |
345 | * bo is deleted: | |
346 | */ | |
347 | if (msm_obj->domain[id].iova) { | |
348 | *iova = msm_obj->domain[id].iova; | |
349 | return 0; | |
350 | } | |
351 | ||
c8afe684 RC |
352 | mutex_lock(&obj->dev->struct_mutex); |
353 | ret = msm_gem_get_iova_locked(obj, id, iova); | |
354 | mutex_unlock(&obj->dev->struct_mutex); | |
355 | return ret; | |
356 | } | |
357 | ||
2638d90a RC |
358 | /* get iova without taking a reference, used in places where you have |
359 | * already done a 'msm_gem_get_iova()'. | |
360 | */ | |
78babc16 | 361 | uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) |
2638d90a RC |
362 | { |
363 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
364 | WARN_ON(!msm_obj->domain[id].iova); | |
365 | return msm_obj->domain[id].iova; | |
366 | } | |
367 | ||
c8afe684 RC |
368 | void msm_gem_put_iova(struct drm_gem_object *obj, int id) |
369 | { | |
370 | // XXX TODO .. | |
371 | // NOTE: probably don't need a _locked() version.. we wouldn't | |
372 | // normally unmap here, but instead just mark that it could be | |
373 | // unmapped (if the iova refcnt drops to zero), but then later | |
374 | // if another _get_iova_locked() fails we can start unmapping | |
375 | // things that are no longer needed.. | |
376 | } | |
377 | ||
378 | int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
379 | struct drm_mode_create_dumb *args) | |
380 | { | |
381 | args->pitch = align_pitch(args->width, args->bpp); | |
382 | args->size = PAGE_ALIGN(args->pitch * args->height); | |
383 | return msm_gem_new_handle(dev, file, args->size, | |
384 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); | |
385 | } | |
386 | ||
c8afe684 RC |
387 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
388 | uint32_t handle, uint64_t *offset) | |
389 | { | |
390 | struct drm_gem_object *obj; | |
391 | int ret = 0; | |
392 | ||
393 | /* GEM does all our handle to object mapping */ | |
a8ad0bd8 | 394 | obj = drm_gem_object_lookup(file, handle); |
c8afe684 RC |
395 | if (obj == NULL) { |
396 | ret = -ENOENT; | |
397 | goto fail; | |
398 | } | |
399 | ||
400 | *offset = msm_gem_mmap_offset(obj); | |
401 | ||
402 | drm_gem_object_unreference_unlocked(obj); | |
403 | ||
404 | fail: | |
405 | return ret; | |
406 | } | |
407 | ||
18f23049 | 408 | void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) |
c8afe684 RC |
409 | { |
410 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
411 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | |
412 | if (!msm_obj->vaddr) { | |
413 | struct page **pages = get_pages(obj); | |
414 | if (IS_ERR(pages)) | |
415 | return ERR_CAST(pages); | |
416 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | |
417 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
69a834c2 RC |
418 | if (msm_obj->vaddr == NULL) |
419 | return ERR_PTR(-ENOMEM); | |
c8afe684 | 420 | } |
e1e9db2c | 421 | msm_obj->vmap_count++; |
c8afe684 RC |
422 | return msm_obj->vaddr; |
423 | } | |
424 | ||
18f23049 | 425 | void *msm_gem_get_vaddr(struct drm_gem_object *obj) |
c8afe684 RC |
426 | { |
427 | void *ret; | |
428 | mutex_lock(&obj->dev->struct_mutex); | |
18f23049 | 429 | ret = msm_gem_get_vaddr_locked(obj); |
c8afe684 RC |
430 | mutex_unlock(&obj->dev->struct_mutex); |
431 | return ret; | |
432 | } | |
433 | ||
18f23049 RC |
434 | void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) |
435 | { | |
e1e9db2c | 436 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
18f23049 | 437 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
e1e9db2c RC |
438 | WARN_ON(msm_obj->vmap_count < 1); |
439 | msm_obj->vmap_count--; | |
18f23049 RC |
440 | } |
441 | ||
442 | void msm_gem_put_vaddr(struct drm_gem_object *obj) | |
443 | { | |
e1e9db2c RC |
444 | mutex_lock(&obj->dev->struct_mutex); |
445 | msm_gem_put_vaddr_locked(obj); | |
446 | mutex_unlock(&obj->dev->struct_mutex); | |
18f23049 RC |
447 | } |
448 | ||
4cd33c48 RC |
449 | /* Update madvise status, returns true if not purged, else |
450 | * false or -errno. | |
451 | */ | |
452 | int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) | |
453 | { | |
454 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
455 | ||
456 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | |
457 | ||
458 | if (msm_obj->madv != __MSM_MADV_PURGED) | |
459 | msm_obj->madv = madv; | |
460 | ||
461 | return (msm_obj->madv != __MSM_MADV_PURGED); | |
462 | } | |
463 | ||
68209390 RC |
464 | void msm_gem_purge(struct drm_gem_object *obj) |
465 | { | |
466 | struct drm_device *dev = obj->dev; | |
467 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
468 | ||
469 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
470 | WARN_ON(!is_purgeable(msm_obj)); | |
471 | WARN_ON(obj->import_attach); | |
472 | ||
473 | put_iova(obj); | |
474 | ||
e1e9db2c | 475 | msm_gem_vunmap(obj); |
68209390 RC |
476 | |
477 | put_pages(obj); | |
478 | ||
479 | msm_obj->madv = __MSM_MADV_PURGED; | |
480 | ||
481 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
482 | drm_gem_free_mmap_offset(obj); | |
483 | ||
484 | /* Our goal here is to return as much of the memory as | |
485 | * is possible back to the system as we are called from OOM. | |
486 | * To do this we must instruct the shmfs to drop all of its | |
487 | * backing pages, *now*. | |
488 | */ | |
489 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); | |
490 | ||
491 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, | |
492 | 0, (loff_t)-1); | |
493 | } | |
494 | ||
e1e9db2c RC |
495 | void msm_gem_vunmap(struct drm_gem_object *obj) |
496 | { | |
497 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
498 | ||
499 | if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) | |
500 | return; | |
501 | ||
502 | vunmap(msm_obj->vaddr); | |
503 | msm_obj->vaddr = NULL; | |
504 | } | |
505 | ||
b6295f9a RC |
506 | /* must be called before _move_to_active().. */ |
507 | int msm_gem_sync_object(struct drm_gem_object *obj, | |
508 | struct msm_fence_context *fctx, bool exclusive) | |
509 | { | |
510 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
511 | struct reservation_object_list *fobj; | |
f54d1867 | 512 | struct dma_fence *fence; |
b6295f9a RC |
513 | int i, ret; |
514 | ||
515 | if (!exclusive) { | |
516 | /* NOTE: _reserve_shared() must happen before _add_shared_fence(), | |
517 | * which makes this a slightly strange place to call it. OTOH this | |
518 | * is a convenient can-fail point to hook it in. (And similar to | |
519 | * how etnaviv and nouveau handle this.) | |
520 | */ | |
521 | ret = reservation_object_reserve_shared(msm_obj->resv); | |
522 | if (ret) | |
523 | return ret; | |
524 | } | |
525 | ||
526 | fobj = reservation_object_get_list(msm_obj->resv); | |
527 | if (!fobj || (fobj->shared_count == 0)) { | |
528 | fence = reservation_object_get_excl(msm_obj->resv); | |
529 | /* don't need to wait on our own fences, since ring is fifo */ | |
530 | if (fence && (fence->context != fctx->context)) { | |
f54d1867 | 531 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
532 | if (ret) |
533 | return ret; | |
534 | } | |
535 | } | |
536 | ||
537 | if (!exclusive || !fobj) | |
538 | return 0; | |
539 | ||
540 | for (i = 0; i < fobj->shared_count; i++) { | |
541 | fence = rcu_dereference_protected(fobj->shared[i], | |
542 | reservation_object_held(msm_obj->resv)); | |
543 | if (fence->context != fctx->context) { | |
f54d1867 | 544 | ret = dma_fence_wait(fence, true); |
b6295f9a RC |
545 | if (ret) |
546 | return ret; | |
547 | } | |
548 | } | |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
7198e6b0 | 553 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
f54d1867 | 554 | struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) |
7198e6b0 RC |
555 | { |
556 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
4cd33c48 | 557 | WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); |
7198e6b0 | 558 | msm_obj->gpu = gpu; |
b6295f9a RC |
559 | if (exclusive) |
560 | reservation_object_add_excl_fence(msm_obj->resv, fence); | |
bf6811f3 | 561 | else |
b6295f9a | 562 | reservation_object_add_shared_fence(msm_obj->resv, fence); |
7198e6b0 RC |
563 | list_del_init(&msm_obj->mm_list); |
564 | list_add_tail(&msm_obj->mm_list, &gpu->active_list); | |
565 | } | |
566 | ||
567 | void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |
568 | { | |
569 | struct drm_device *dev = obj->dev; | |
570 | struct msm_drm_private *priv = dev->dev_private; | |
571 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
572 | ||
573 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
574 | ||
575 | msm_obj->gpu = NULL; | |
7198e6b0 RC |
576 | list_del_init(&msm_obj->mm_list); |
577 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | |
7198e6b0 RC |
578 | } |
579 | ||
b6295f9a | 580 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
7198e6b0 | 581 | { |
7198e6b0 | 582 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a | 583 | bool write = !!(op & MSM_PREP_WRITE); |
f755e227 CW |
584 | unsigned long remain = |
585 | op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); | |
586 | long ret; | |
587 | ||
588 | ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, | |
589 | true, remain); | |
590 | if (ret == 0) | |
591 | return remain == 0 ? -EBUSY : -ETIMEDOUT; | |
592 | else if (ret < 0) | |
593 | return ret; | |
7198e6b0 RC |
594 | |
595 | /* TODO cache maintenance */ | |
c8afe684 | 596 | |
b6295f9a | 597 | return 0; |
7198e6b0 | 598 | } |
c8afe684 | 599 | |
7198e6b0 RC |
600 | int msm_gem_cpu_fini(struct drm_gem_object *obj) |
601 | { | |
602 | /* TODO cache maintenance */ | |
c8afe684 RC |
603 | return 0; |
604 | } | |
605 | ||
606 | #ifdef CONFIG_DEBUG_FS | |
f54d1867 | 607 | static void describe_fence(struct dma_fence *fence, const char *type, |
b6295f9a RC |
608 | struct seq_file *m) |
609 | { | |
f54d1867 | 610 | if (!dma_fence_is_signaled(fence)) |
b6295f9a RC |
611 | seq_printf(m, "\t%9s: %s %s seq %u\n", type, |
612 | fence->ops->get_driver_name(fence), | |
613 | fence->ops->get_timeline_name(fence), | |
614 | fence->seqno); | |
615 | } | |
616 | ||
c8afe684 RC |
617 | void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
618 | { | |
c8afe684 | 619 | struct msm_gem_object *msm_obj = to_msm_bo(obj); |
b6295f9a RC |
620 | struct reservation_object *robj = msm_obj->resv; |
621 | struct reservation_object_list *fobj; | |
667ce33e | 622 | struct msm_drm_private *priv = obj->dev->dev_private; |
f54d1867 | 623 | struct dma_fence *fence; |
c8afe684 | 624 | uint64_t off = drm_vma_node_start(&obj->vma_node); |
4cd33c48 | 625 | const char *madv; |
667ce33e | 626 | unsigned id; |
c8afe684 | 627 | |
b6295f9a RC |
628 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
629 | ||
4cd33c48 RC |
630 | switch (msm_obj->madv) { |
631 | case __MSM_MADV_PURGED: | |
632 | madv = " purged"; | |
633 | break; | |
634 | case MSM_MADV_DONTNEED: | |
635 | madv = " purgeable"; | |
636 | break; | |
637 | case MSM_MADV_WILLNEED: | |
638 | default: | |
639 | madv = ""; | |
640 | break; | |
641 | } | |
642 | ||
667ce33e | 643 | seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", |
7198e6b0 | 644 | msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', |
2c935bc5 | 645 | obj->name, kref_read(&obj->refcount), |
667ce33e RC |
646 | off, msm_obj->vaddr); |
647 | ||
648 | for (id = 0; id < priv->num_aspaces; id++) | |
649 | seq_printf(m, " %08llx", msm_obj->domain[id].iova); | |
650 | ||
651 | seq_printf(m, " %zu%s\n", obj->size, madv); | |
b6295f9a RC |
652 | |
653 | rcu_read_lock(); | |
654 | fobj = rcu_dereference(robj->fence); | |
655 | if (fobj) { | |
656 | unsigned int i, shared_count = fobj->shared_count; | |
657 | ||
658 | for (i = 0; i < shared_count; i++) { | |
659 | fence = rcu_dereference(fobj->shared[i]); | |
660 | describe_fence(fence, "Shared", m); | |
661 | } | |
662 | } | |
663 | ||
664 | fence = rcu_dereference(robj->fence_excl); | |
665 | if (fence) | |
666 | describe_fence(fence, "Exclusive", m); | |
667 | rcu_read_unlock(); | |
c8afe684 RC |
668 | } |
669 | ||
670 | void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) | |
671 | { | |
672 | struct msm_gem_object *msm_obj; | |
673 | int count = 0; | |
674 | size_t size = 0; | |
675 | ||
676 | list_for_each_entry(msm_obj, list, mm_list) { | |
677 | struct drm_gem_object *obj = &msm_obj->base; | |
678 | seq_printf(m, " "); | |
679 | msm_gem_describe(obj, m); | |
680 | count++; | |
681 | size += obj->size; | |
682 | } | |
683 | ||
684 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
685 | } | |
686 | #endif | |
687 | ||
688 | void msm_gem_free_object(struct drm_gem_object *obj) | |
689 | { | |
690 | struct drm_device *dev = obj->dev; | |
691 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | |
c8afe684 RC |
692 | |
693 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
694 | ||
7198e6b0 RC |
695 | /* object should not be on active list: */ |
696 | WARN_ON(is_active(msm_obj)); | |
697 | ||
c8afe684 RC |
698 | list_del(&msm_obj->mm_list); |
699 | ||
4fe5f65e | 700 | put_iova(obj); |
c8afe684 | 701 | |
05b84911 RC |
702 | if (obj->import_attach) { |
703 | if (msm_obj->vaddr) | |
704 | dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); | |
705 | ||
706 | /* Don't drop the pages for imported dmabuf, as they are not | |
707 | * ours, just free the array we allocated: | |
708 | */ | |
709 | if (msm_obj->pages) | |
710 | drm_free_large(msm_obj->pages); | |
c8afe684 | 711 | |
f28730c8 | 712 | drm_prime_gem_destroy(obj, msm_obj->sgt); |
05b84911 | 713 | } else { |
e1e9db2c | 714 | msm_gem_vunmap(obj); |
05b84911 RC |
715 | put_pages(obj); |
716 | } | |
c8afe684 | 717 | |
7198e6b0 RC |
718 | if (msm_obj->resv == &msm_obj->_resv) |
719 | reservation_object_fini(msm_obj->resv); | |
720 | ||
c8afe684 RC |
721 | drm_gem_object_release(obj); |
722 | ||
723 | kfree(msm_obj); | |
724 | } | |
725 | ||
726 | /* convenience method to construct a GEM buffer object, and userspace handle */ | |
727 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
728 | uint32_t size, uint32_t flags, uint32_t *handle) | |
729 | { | |
730 | struct drm_gem_object *obj; | |
731 | int ret; | |
732 | ||
733 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
734 | if (ret) | |
735 | return ret; | |
736 | ||
737 | obj = msm_gem_new(dev, size, flags); | |
738 | ||
739 | mutex_unlock(&dev->struct_mutex); | |
740 | ||
741 | if (IS_ERR(obj)) | |
742 | return PTR_ERR(obj); | |
743 | ||
744 | ret = drm_gem_handle_create(file, obj, handle); | |
745 | ||
746 | /* drop reference from allocate - handle holds it now */ | |
747 | drm_gem_object_unreference_unlocked(obj); | |
748 | ||
749 | return ret; | |
750 | } | |
751 | ||
05b84911 RC |
752 | static int msm_gem_new_impl(struct drm_device *dev, |
753 | uint32_t size, uint32_t flags, | |
79f0e202 | 754 | struct reservation_object *resv, |
05b84911 | 755 | struct drm_gem_object **obj) |
c8afe684 RC |
756 | { |
757 | struct msm_drm_private *priv = dev->dev_private; | |
758 | struct msm_gem_object *msm_obj; | |
072f1f91 | 759 | bool use_vram = false; |
c8afe684 RC |
760 | |
761 | switch (flags & MSM_BO_CACHE_MASK) { | |
762 | case MSM_BO_UNCACHED: | |
763 | case MSM_BO_CACHED: | |
764 | case MSM_BO_WC: | |
765 | break; | |
766 | default: | |
767 | dev_err(dev->dev, "invalid cache flag: %x\n", | |
768 | (flags & MSM_BO_CACHE_MASK)); | |
05b84911 | 769 | return -EINVAL; |
c8afe684 RC |
770 | } |
771 | ||
871d812a | 772 | if (!iommu_present(&platform_bus_type)) |
072f1f91 RC |
773 | use_vram = true; |
774 | else if ((flags & MSM_BO_STOLEN) && priv->vram.size) | |
775 | use_vram = true; | |
776 | ||
777 | if (WARN_ON(use_vram && !priv->vram.size)) | |
778 | return -EINVAL; | |
779 | ||
667ce33e | 780 | msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); |
05b84911 RC |
781 | if (!msm_obj) |
782 | return -ENOMEM; | |
c8afe684 | 783 | |
072f1f91 | 784 | if (use_vram) |
667ce33e | 785 | msm_obj->vram_node = &msm_obj->domain[0].node; |
871d812a | 786 | |
c8afe684 | 787 | msm_obj->flags = flags; |
4cd33c48 | 788 | msm_obj->madv = MSM_MADV_WILLNEED; |
c8afe684 | 789 | |
79f0e202 RC |
790 | if (resv) { |
791 | msm_obj->resv = resv; | |
792 | } else { | |
793 | msm_obj->resv = &msm_obj->_resv; | |
794 | reservation_object_init(msm_obj->resv); | |
795 | } | |
c8afe684 | 796 | |
7198e6b0 | 797 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
c8afe684 RC |
798 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
799 | ||
05b84911 RC |
800 | *obj = &msm_obj->base; |
801 | ||
802 | return 0; | |
803 | } | |
804 | ||
805 | struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |
806 | uint32_t size, uint32_t flags) | |
807 | { | |
871d812a | 808 | struct drm_gem_object *obj = NULL; |
05b84911 RC |
809 | int ret; |
810 | ||
811 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
812 | ||
813 | size = PAGE_ALIGN(size); | |
814 | ||
79f0e202 | 815 | ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); |
05b84911 RC |
816 | if (ret) |
817 | goto fail; | |
818 | ||
072f1f91 | 819 | if (use_pages(obj)) { |
871d812a RC |
820 | ret = drm_gem_object_init(dev, obj, size); |
821 | if (ret) | |
822 | goto fail; | |
823 | } else { | |
824 | drm_gem_private_object_init(dev, obj, size); | |
825 | } | |
05b84911 RC |
826 | |
827 | return obj; | |
828 | ||
829 | fail: | |
0a677125 | 830 | drm_gem_object_unreference(obj); |
05b84911 RC |
831 | return ERR_PTR(ret); |
832 | } | |
833 | ||
834 | struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |
79f0e202 | 835 | struct dma_buf *dmabuf, struct sg_table *sgt) |
05b84911 RC |
836 | { |
837 | struct msm_gem_object *msm_obj; | |
838 | struct drm_gem_object *obj; | |
79f0e202 | 839 | uint32_t size; |
05b84911 RC |
840 | int ret, npages; |
841 | ||
871d812a RC |
842 | /* if we don't have IOMMU, don't bother pretending we can import: */ |
843 | if (!iommu_present(&platform_bus_type)) { | |
844 | dev_err(dev->dev, "cannot import without IOMMU\n"); | |
845 | return ERR_PTR(-EINVAL); | |
846 | } | |
847 | ||
79f0e202 | 848 | size = PAGE_ALIGN(dmabuf->size); |
05b84911 | 849 | |
79f0e202 | 850 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); |
05b84911 RC |
851 | if (ret) |
852 | goto fail; | |
853 | ||
854 | drm_gem_private_object_init(dev, obj, size); | |
855 | ||
856 | npages = size / PAGE_SIZE; | |
857 | ||
858 | msm_obj = to_msm_bo(obj); | |
859 | msm_obj->sgt = sgt; | |
860 | msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); | |
861 | if (!msm_obj->pages) { | |
862 | ret = -ENOMEM; | |
863 | goto fail; | |
864 | } | |
865 | ||
866 | ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); | |
867 | if (ret) | |
868 | goto fail; | |
869 | ||
c8afe684 RC |
870 | return obj; |
871 | ||
872 | fail: | |
e73a8569 | 873 | drm_gem_object_unreference_unlocked(obj); |
c8afe684 RC |
874 | return ERR_PTR(ret); |
875 | } |