]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/msm/msm_gem.c
drm/msm: basic KMS driver for snapdragon
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / msm / msm_gem.c
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20
21 #include "msm_drv.h"
22 #include "msm_gem.h"
23
24
25 /* called with dev->struct_mutex held */
26 static struct page **get_pages(struct drm_gem_object *obj)
27 {
28 struct msm_gem_object *msm_obj = to_msm_bo(obj);
29
30 if (!msm_obj->pages) {
31 struct drm_device *dev = obj->dev;
32 struct page **p = drm_gem_get_pages(obj, 0);
33 int npages = obj->size >> PAGE_SHIFT;
34
35 if (IS_ERR(p)) {
36 dev_err(dev->dev, "could not get pages: %ld\n",
37 PTR_ERR(p));
38 return p;
39 }
40
41 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
42 if (!msm_obj->sgt) {
43 dev_err(dev->dev, "failed to allocate sgt\n");
44 return ERR_PTR(-ENOMEM);
45 }
46
47 msm_obj->pages = p;
48
49 /* For non-cached buffers, ensure the new pages are clean
50 * because display controller, GPU, etc. are not coherent:
51 */
52 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
53 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
54 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
55 }
56
57 return msm_obj->pages;
58 }
59
60 static void put_pages(struct drm_gem_object *obj)
61 {
62 struct msm_gem_object *msm_obj = to_msm_bo(obj);
63
64 if (msm_obj->pages) {
65 /* For non-cached buffers, ensure the new pages are clean
66 * because display controller, GPU, etc. are not coherent:
67 */
68 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
69 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
70 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
71 sg_free_table(msm_obj->sgt);
72 kfree(msm_obj->sgt);
73
74 drm_gem_put_pages(obj, msm_obj->pages, true, false);
75 msm_obj->pages = NULL;
76 }
77 }
78
79 int msm_gem_mmap_obj(struct drm_gem_object *obj,
80 struct vm_area_struct *vma)
81 {
82 struct msm_gem_object *msm_obj = to_msm_bo(obj);
83
84 vma->vm_flags &= ~VM_PFNMAP;
85 vma->vm_flags |= VM_MIXEDMAP;
86
87 if (msm_obj->flags & MSM_BO_WC) {
88 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
89 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
90 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
91 } else {
92 /*
93 * Shunt off cached objs to shmem file so they have their own
94 * address_space (so unmap_mapping_range does what we want,
95 * in particular in the case of mmap'd dmabufs)
96 */
97 fput(vma->vm_file);
98 get_file(obj->filp);
99 vma->vm_pgoff = 0;
100 vma->vm_file = obj->filp;
101
102 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
103 }
104
105 return 0;
106 }
107
108 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
109 {
110 int ret;
111
112 ret = drm_gem_mmap(filp, vma);
113 if (ret) {
114 DBG("mmap failed: %d", ret);
115 return ret;
116 }
117
118 return msm_gem_mmap_obj(vma->vm_private_data, vma);
119 }
120
121 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122 {
123 struct drm_gem_object *obj = vma->vm_private_data;
124 struct msm_gem_object *msm_obj = to_msm_bo(obj);
125 struct drm_device *dev = obj->dev;
126 struct page **pages;
127 unsigned long pfn;
128 pgoff_t pgoff;
129 int ret;
130
131 /* Make sure we don't parallel update on a fault, nor move or remove
132 * something from beneath our feet
133 */
134 ret = mutex_lock_interruptible(&dev->struct_mutex);
135 if (ret)
136 goto out;
137
138 /* make sure we have pages attached now */
139 pages = get_pages(obj);
140 if (IS_ERR(pages)) {
141 ret = PTR_ERR(pages);
142 goto out_unlock;
143 }
144
145 /* We don't use vmf->pgoff since that has the fake offset: */
146 pgoff = ((unsigned long)vmf->virtual_address -
147 vma->vm_start) >> PAGE_SHIFT;
148
149 pfn = page_to_pfn(msm_obj->pages[pgoff]);
150
151 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
152 pfn, pfn << PAGE_SHIFT);
153
154 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
155
156 out_unlock:
157 mutex_unlock(&dev->struct_mutex);
158 out:
159 switch (ret) {
160 case -EAGAIN:
161 set_need_resched();
162 case 0:
163 case -ERESTARTSYS:
164 case -EINTR:
165 return VM_FAULT_NOPAGE;
166 case -ENOMEM:
167 return VM_FAULT_OOM;
168 default:
169 return VM_FAULT_SIGBUS;
170 }
171 }
172
173 /** get mmap offset */
174 static uint64_t mmap_offset(struct drm_gem_object *obj)
175 {
176 struct drm_device *dev = obj->dev;
177 int ret;
178
179 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
180
181 /* Make it mmapable */
182 ret = drm_gem_create_mmap_offset(obj);
183
184 if (ret) {
185 dev_err(dev->dev, "could not allocate mmap offset\n");
186 return 0;
187 }
188
189 return drm_vma_node_offset_addr(&obj->vma_node);
190 }
191
192 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
193 {
194 uint64_t offset;
195 mutex_lock(&obj->dev->struct_mutex);
196 offset = mmap_offset(obj);
197 mutex_unlock(&obj->dev->struct_mutex);
198 return offset;
199 }
200
201 /* helpers for dealing w/ iommu: */
202 static int map_range(struct iommu_domain *domain, unsigned int iova,
203 struct sg_table *sgt, unsigned int len, int prot)
204 {
205 struct scatterlist *sg;
206 unsigned int da = iova;
207 unsigned int i, j;
208 int ret;
209
210 if (!domain || !sgt)
211 return -EINVAL;
212
213 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
214 u32 pa = sg_phys(sg) - sg->offset;
215 size_t bytes = sg->length + sg->offset;
216
217 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
218
219 ret = iommu_map(domain, da, pa, bytes, prot);
220 if (ret)
221 goto fail;
222
223 da += bytes;
224 }
225
226 return 0;
227
228 fail:
229 da = iova;
230
231 for_each_sg(sgt->sgl, sg, i, j) {
232 size_t bytes = sg->length + sg->offset;
233 iommu_unmap(domain, da, bytes);
234 da += bytes;
235 }
236 return ret;
237 }
238
239 static void unmap_range(struct iommu_domain *domain, unsigned int iova,
240 struct sg_table *sgt, unsigned int len)
241 {
242 struct scatterlist *sg;
243 unsigned int da = iova;
244 int i;
245
246 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
247 size_t bytes = sg->length + sg->offset;
248 size_t unmapped;
249
250 unmapped = iommu_unmap(domain, da, bytes);
251 if (unmapped < bytes)
252 break;
253
254 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
255
256 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
257
258 da += bytes;
259 }
260 }
261
262 /* should be called under struct_mutex.. although it can be called
263 * from atomic context without struct_mutex to acquire an extra
264 * iova ref if you know one is already held.
265 *
266 * That means when I do eventually need to add support for unpinning
267 * the refcnt counter needs to be atomic_t.
268 */
269 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
270 uint32_t *iova)
271 {
272 struct msm_gem_object *msm_obj = to_msm_bo(obj);
273 int ret = 0;
274
275 if (!msm_obj->domain[id].iova) {
276 struct msm_drm_private *priv = obj->dev->dev_private;
277 uint32_t offset = (uint32_t)mmap_offset(obj);
278 struct page **pages;
279 pages = get_pages(obj);
280 if (IS_ERR(pages))
281 return PTR_ERR(pages);
282 // XXX ideally we would not map buffers writable when not needed...
283 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
284 obj->size, IOMMU_READ | IOMMU_WRITE);
285 msm_obj->domain[id].iova = offset;
286 }
287
288 if (!ret)
289 *iova = msm_obj->domain[id].iova;
290
291 return ret;
292 }
293
294 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
295 {
296 int ret;
297 mutex_lock(&obj->dev->struct_mutex);
298 ret = msm_gem_get_iova_locked(obj, id, iova);
299 mutex_unlock(&obj->dev->struct_mutex);
300 return ret;
301 }
302
303 void msm_gem_put_iova(struct drm_gem_object *obj, int id)
304 {
305 // XXX TODO ..
306 // NOTE: probably don't need a _locked() version.. we wouldn't
307 // normally unmap here, but instead just mark that it could be
308 // unmapped (if the iova refcnt drops to zero), but then later
309 // if another _get_iova_locked() fails we can start unmapping
310 // things that are no longer needed..
311 }
312
313 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
314 struct drm_mode_create_dumb *args)
315 {
316 args->pitch = align_pitch(args->width, args->bpp);
317 args->size = PAGE_ALIGN(args->pitch * args->height);
318 return msm_gem_new_handle(dev, file, args->size,
319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
320 }
321
322 int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
323 uint32_t handle)
324 {
325 /* No special work needed, drop the reference and see what falls out */
326 return drm_gem_handle_delete(file, handle);
327 }
328
329 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
330 uint32_t handle, uint64_t *offset)
331 {
332 struct drm_gem_object *obj;
333 int ret = 0;
334
335 /* GEM does all our handle to object mapping */
336 obj = drm_gem_object_lookup(dev, file, handle);
337 if (obj == NULL) {
338 ret = -ENOENT;
339 goto fail;
340 }
341
342 *offset = msm_gem_mmap_offset(obj);
343
344 drm_gem_object_unreference_unlocked(obj);
345
346 fail:
347 return ret;
348 }
349
350 void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
351 {
352 struct msm_gem_object *msm_obj = to_msm_bo(obj);
353 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
354 if (!msm_obj->vaddr) {
355 struct page **pages = get_pages(obj);
356 if (IS_ERR(pages))
357 return ERR_CAST(pages);
358 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
359 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
360 }
361 return msm_obj->vaddr;
362 }
363
364 void *msm_gem_vaddr(struct drm_gem_object *obj)
365 {
366 void *ret;
367 mutex_lock(&obj->dev->struct_mutex);
368 ret = msm_gem_vaddr_locked(obj);
369 mutex_unlock(&obj->dev->struct_mutex);
370 return ret;
371 }
372
373 int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
374 struct work_struct *work)
375 {
376 struct drm_device *dev = obj->dev;
377 struct msm_drm_private *priv = dev->dev_private;
378
379 /* just a place-holder until we have gpu.. */
380 queue_work(priv->wq, work);
381
382 return 0;
383 }
384
385 #ifdef CONFIG_DEBUG_FS
386 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
387 {
388 struct drm_device *dev = obj->dev;
389 struct msm_gem_object *msm_obj = to_msm_bo(obj);
390 uint64_t off = drm_vma_node_start(&obj->vma_node);
391
392 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
393 seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n",
394 msm_obj->flags, obj->name, obj->refcount.refcount.counter,
395 off, msm_obj->vaddr, obj->size);
396 }
397
398 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
399 {
400 struct msm_gem_object *msm_obj;
401 int count = 0;
402 size_t size = 0;
403
404 list_for_each_entry(msm_obj, list, mm_list) {
405 struct drm_gem_object *obj = &msm_obj->base;
406 seq_printf(m, " ");
407 msm_gem_describe(obj, m);
408 count++;
409 size += obj->size;
410 }
411
412 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
413 }
414 #endif
415
416 void msm_gem_free_object(struct drm_gem_object *obj)
417 {
418 struct drm_device *dev = obj->dev;
419 struct msm_gem_object *msm_obj = to_msm_bo(obj);
420 int id;
421
422 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
423
424 list_del(&msm_obj->mm_list);
425
426 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
427 if (msm_obj->domain[id].iova) {
428 struct msm_drm_private *priv = obj->dev->dev_private;
429 uint32_t offset = (uint32_t)mmap_offset(obj);
430 unmap_range(priv->iommus[id], offset,
431 msm_obj->sgt, obj->size);
432 }
433 }
434
435 drm_gem_free_mmap_offset(obj);
436
437 if (msm_obj->vaddr)
438 vunmap(msm_obj->vaddr);
439
440 put_pages(obj);
441
442 drm_gem_object_release(obj);
443
444 kfree(msm_obj);
445 }
446
447 /* convenience method to construct a GEM buffer object, and userspace handle */
448 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
449 uint32_t size, uint32_t flags, uint32_t *handle)
450 {
451 struct drm_gem_object *obj;
452 int ret;
453
454 ret = mutex_lock_interruptible(&dev->struct_mutex);
455 if (ret)
456 return ret;
457
458 obj = msm_gem_new(dev, size, flags);
459
460 mutex_unlock(&dev->struct_mutex);
461
462 if (IS_ERR(obj))
463 return PTR_ERR(obj);
464
465 ret = drm_gem_handle_create(file, obj, handle);
466
467 /* drop reference from allocate - handle holds it now */
468 drm_gem_object_unreference_unlocked(obj);
469
470 return ret;
471 }
472
473 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
474 uint32_t size, uint32_t flags)
475 {
476 struct msm_drm_private *priv = dev->dev_private;
477 struct msm_gem_object *msm_obj;
478 struct drm_gem_object *obj = NULL;
479 int ret;
480
481 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
482
483 size = PAGE_ALIGN(size);
484
485 switch (flags & MSM_BO_CACHE_MASK) {
486 case MSM_BO_UNCACHED:
487 case MSM_BO_CACHED:
488 case MSM_BO_WC:
489 break;
490 default:
491 dev_err(dev->dev, "invalid cache flag: %x\n",
492 (flags & MSM_BO_CACHE_MASK));
493 ret = -EINVAL;
494 goto fail;
495 }
496
497 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
498 if (!msm_obj) {
499 ret = -ENOMEM;
500 goto fail;
501 }
502
503 obj = &msm_obj->base;
504
505 ret = drm_gem_object_init(dev, obj, size);
506 if (ret)
507 goto fail;
508
509 msm_obj->flags = flags;
510
511
512 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
513
514 return obj;
515
516 fail:
517 if (obj)
518 drm_gem_object_unreference_unlocked(obj);
519
520 return ERR_PTR(ret);
521 }