]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/gpu/drm/rockchip/rockchip_drm_gem.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / rockchip / rockchip_drm_gem.c
1 /*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #include <drm/drm.h>
16 #include <drm/drmP.h>
17 #include <drm/drm_gem.h>
18 #include <drm/drm_vma_manager.h>
19
20 #include <linux/dma-buf.h>
21 #include <linux/iommu.h>
22
23 #include "rockchip_drm_drv.h"
24 #include "rockchip_drm_gem.h"
25
26 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
27 {
28 struct drm_device *drm = rk_obj->base.dev;
29 struct rockchip_drm_private *private = drm->dev_private;
30 int prot = IOMMU_READ | IOMMU_WRITE;
31 ssize_t ret;
32
33 mutex_lock(&private->mm_lock);
34 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
35 rk_obj->base.size, PAGE_SIZE,
36 0, 0);
37 mutex_unlock(&private->mm_lock);
38
39 if (ret < 0) {
40 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
41 return ret;
42 }
43
44 rk_obj->dma_addr = rk_obj->mm.start;
45
46 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
47 rk_obj->sgt->nents, prot);
48 if (ret < rk_obj->base.size) {
49 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
50 ret, rk_obj->base.size);
51 ret = -ENOMEM;
52 goto err_remove_node;
53 }
54
55 rk_obj->size = ret;
56
57 return 0;
58
59 err_remove_node:
60 mutex_lock(&private->mm_lock);
61 drm_mm_remove_node(&rk_obj->mm);
62 mutex_unlock(&private->mm_lock);
63
64 return ret;
65 }
66
67 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
68 {
69 struct drm_device *drm = rk_obj->base.dev;
70 struct rockchip_drm_private *private = drm->dev_private;
71
72 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
73
74 mutex_lock(&private->mm_lock);
75
76 drm_mm_remove_node(&rk_obj->mm);
77
78 mutex_unlock(&private->mm_lock);
79
80 return 0;
81 }
82
83 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
84 {
85 struct drm_device *drm = rk_obj->base.dev;
86 int ret, i;
87 struct scatterlist *s;
88
89 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
90 if (IS_ERR(rk_obj->pages))
91 return PTR_ERR(rk_obj->pages);
92
93 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
94
95 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
96 if (IS_ERR(rk_obj->sgt)) {
97 ret = PTR_ERR(rk_obj->sgt);
98 goto err_put_pages;
99 }
100
101 /*
102 * Fake up the SG table so that dma_sync_sg_for_device() can be used
103 * to flush the pages associated with it.
104 *
105 * TODO: Replace this by drm_clflush_sg() once it can be implemented
106 * without relying on symbols that are not exported.
107 */
108 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
109 sg_dma_address(s) = sg_phys(s);
110
111 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
112 DMA_TO_DEVICE);
113
114 return 0;
115
116 err_put_pages:
117 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
118 return ret;
119 }
120
121 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
122 {
123 sg_free_table(rk_obj->sgt);
124 kfree(rk_obj->sgt);
125 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
126 }
127
128 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
129 bool alloc_kmap)
130 {
131 int ret;
132
133 ret = rockchip_gem_get_pages(rk_obj);
134 if (ret < 0)
135 return ret;
136
137 ret = rockchip_gem_iommu_map(rk_obj);
138 if (ret < 0)
139 goto err_free;
140
141 if (alloc_kmap) {
142 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
143 pgprot_writecombine(PAGE_KERNEL));
144 if (!rk_obj->kvaddr) {
145 DRM_ERROR("failed to vmap() buffer\n");
146 ret = -ENOMEM;
147 goto err_unmap;
148 }
149 }
150
151 return 0;
152
153 err_unmap:
154 rockchip_gem_iommu_unmap(rk_obj);
155 err_free:
156 rockchip_gem_put_pages(rk_obj);
157
158 return ret;
159 }
160
161 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
162 bool alloc_kmap)
163 {
164 struct drm_gem_object *obj = &rk_obj->base;
165 struct drm_device *drm = obj->dev;
166
167 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
168
169 if (!alloc_kmap)
170 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
171
172 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
173 &rk_obj->dma_addr, GFP_KERNEL,
174 rk_obj->dma_attrs);
175 if (!rk_obj->kvaddr) {
176 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
177 return -ENOMEM;
178 }
179
180 return 0;
181 }
182
183 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
184 bool alloc_kmap)
185 {
186 struct drm_gem_object *obj = &rk_obj->base;
187 struct drm_device *drm = obj->dev;
188 struct rockchip_drm_private *private = drm->dev_private;
189
190 if (private->domain)
191 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
192 else
193 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
194 }
195
196 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
197 {
198 vunmap(rk_obj->kvaddr);
199 rockchip_gem_iommu_unmap(rk_obj);
200 rockchip_gem_put_pages(rk_obj);
201 }
202
203 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
204 {
205 struct drm_gem_object *obj = &rk_obj->base;
206 struct drm_device *drm = obj->dev;
207
208 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
209 rk_obj->dma_attrs);
210 }
211
212 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
213 {
214 if (rk_obj->pages)
215 rockchip_gem_free_iommu(rk_obj);
216 else
217 rockchip_gem_free_dma(rk_obj);
218 }
219
220 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
221 struct vm_area_struct *vma)
222 {
223 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
224 unsigned int i, count = obj->size >> PAGE_SHIFT;
225 unsigned long user_count = vma_pages(vma);
226 unsigned long uaddr = vma->vm_start;
227 unsigned long offset = vma->vm_pgoff;
228 unsigned long end = user_count + offset;
229 int ret;
230
231 if (user_count == 0)
232 return -ENXIO;
233 if (end > count)
234 return -ENXIO;
235
236 for (i = offset; i < end; i++) {
237 ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
238 if (ret)
239 return ret;
240 uaddr += PAGE_SIZE;
241 }
242
243 return 0;
244 }
245
246 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
247 struct vm_area_struct *vma)
248 {
249 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
250 struct drm_device *drm = obj->dev;
251
252 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
253 obj->size, rk_obj->dma_attrs);
254 }
255
256 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
257 struct vm_area_struct *vma)
258 {
259 int ret;
260 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
261
262 /*
263 * We allocated a struct page table for rk_obj, so clear
264 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
265 */
266 vma->vm_flags &= ~VM_PFNMAP;
267
268 if (rk_obj->pages)
269 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
270 else
271 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
272
273 if (ret)
274 drm_gem_vm_close(vma);
275
276 return ret;
277 }
278
279 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
280 struct vm_area_struct *vma)
281 {
282 int ret;
283
284 ret = drm_gem_mmap_obj(obj, obj->size, vma);
285 if (ret)
286 return ret;
287
288 return rockchip_drm_gem_object_mmap(obj, vma);
289 }
290
291 /* drm driver mmap file operations */
292 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
293 {
294 struct drm_gem_object *obj;
295 int ret;
296
297 ret = drm_gem_mmap(filp, vma);
298 if (ret)
299 return ret;
300
301 /*
302 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
303 * whole buffer from the start.
304 */
305 vma->vm_pgoff = 0;
306
307 obj = vma->vm_private_data;
308
309 return rockchip_drm_gem_object_mmap(obj, vma);
310 }
311
312 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
313 {
314 drm_gem_object_release(&rk_obj->base);
315 kfree(rk_obj);
316 }
317
318 struct rockchip_gem_object *
319 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
320 {
321 struct rockchip_gem_object *rk_obj;
322 struct drm_gem_object *obj;
323
324 size = round_up(size, PAGE_SIZE);
325
326 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
327 if (!rk_obj)
328 return ERR_PTR(-ENOMEM);
329
330 obj = &rk_obj->base;
331
332 drm_gem_object_init(drm, obj, size);
333
334 return rk_obj;
335 }
336
337 struct rockchip_gem_object *
338 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
339 bool alloc_kmap)
340 {
341 struct rockchip_gem_object *rk_obj;
342 int ret;
343
344 rk_obj = rockchip_gem_alloc_object(drm, size);
345 if (IS_ERR(rk_obj))
346 return rk_obj;
347
348 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
349 if (ret)
350 goto err_free_rk_obj;
351
352 return rk_obj;
353
354 err_free_rk_obj:
355 rockchip_gem_release_object(rk_obj);
356 return ERR_PTR(ret);
357 }
358
359 /*
360 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
361 * callback function
362 */
363 void rockchip_gem_free_object(struct drm_gem_object *obj)
364 {
365 struct drm_device *drm = obj->dev;
366 struct rockchip_drm_private *private = drm->dev_private;
367 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
368
369 if (obj->import_attach) {
370 if (private->domain) {
371 rockchip_gem_iommu_unmap(rk_obj);
372 } else {
373 dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
374 rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
375 }
376 drm_prime_gem_destroy(obj, rk_obj->sgt);
377 } else {
378 rockchip_gem_free_buf(rk_obj);
379 }
380
381 rockchip_gem_release_object(rk_obj);
382 }
383
384 /*
385 * rockchip_gem_create_with_handle - allocate an object with the given
386 * size and create a gem handle on it
387 *
388 * returns a struct rockchip_gem_object* on success or ERR_PTR values
389 * on failure.
390 */
391 static struct rockchip_gem_object *
392 rockchip_gem_create_with_handle(struct drm_file *file_priv,
393 struct drm_device *drm, unsigned int size,
394 unsigned int *handle)
395 {
396 struct rockchip_gem_object *rk_obj;
397 struct drm_gem_object *obj;
398 int ret;
399
400 rk_obj = rockchip_gem_create_object(drm, size, false);
401 if (IS_ERR(rk_obj))
402 return ERR_CAST(rk_obj);
403
404 obj = &rk_obj->base;
405
406 /*
407 * allocate a id of idr table where the obj is registered
408 * and handle has the id what user can see.
409 */
410 ret = drm_gem_handle_create(file_priv, obj, handle);
411 if (ret)
412 goto err_handle_create;
413
414 /* drop reference from allocate - handle holds it now. */
415 drm_gem_object_put_unlocked(obj);
416
417 return rk_obj;
418
419 err_handle_create:
420 rockchip_gem_free_object(obj);
421
422 return ERR_PTR(ret);
423 }
424
425 /*
426 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
427 * function
428 *
429 * This aligns the pitch and size arguments to the minimum required. wrap
430 * this into your own function if you need bigger alignment.
431 */
432 int rockchip_gem_dumb_create(struct drm_file *file_priv,
433 struct drm_device *dev,
434 struct drm_mode_create_dumb *args)
435 {
436 struct rockchip_gem_object *rk_obj;
437 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
438
439 /*
440 * align to 64 bytes since Mali requires it.
441 */
442 args->pitch = ALIGN(min_pitch, 64);
443 args->size = args->pitch * args->height;
444
445 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
446 &args->handle);
447
448 return PTR_ERR_OR_ZERO(rk_obj);
449 }
450
451 /*
452 * Allocate a sg_table for this GEM object.
453 * Note: Both the table's contents, and the sg_table itself must be freed by
454 * the caller.
455 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
456 */
457 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
458 {
459 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
460 struct drm_device *drm = obj->dev;
461 struct sg_table *sgt;
462 int ret;
463
464 if (rk_obj->pages)
465 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
466
467 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
468 if (!sgt)
469 return ERR_PTR(-ENOMEM);
470
471 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
472 rk_obj->dma_addr, obj->size,
473 rk_obj->dma_attrs);
474 if (ret) {
475 DRM_ERROR("failed to allocate sgt, %d\n", ret);
476 kfree(sgt);
477 return ERR_PTR(ret);
478 }
479
480 return sgt;
481 }
482
483 static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
484 int count)
485 {
486 struct scatterlist *s;
487 dma_addr_t expected = sg_dma_address(sgt->sgl);
488 unsigned int i;
489 unsigned long size = 0;
490
491 for_each_sg(sgt->sgl, s, count, i) {
492 if (sg_dma_address(s) != expected)
493 break;
494 expected = sg_dma_address(s) + sg_dma_len(s);
495 size += sg_dma_len(s);
496 }
497 return size;
498 }
499
500 static int
501 rockchip_gem_iommu_map_sg(struct drm_device *drm,
502 struct dma_buf_attachment *attach,
503 struct sg_table *sg,
504 struct rockchip_gem_object *rk_obj)
505 {
506 rk_obj->sgt = sg;
507 return rockchip_gem_iommu_map(rk_obj);
508 }
509
510 static int
511 rockchip_gem_dma_map_sg(struct drm_device *drm,
512 struct dma_buf_attachment *attach,
513 struct sg_table *sg,
514 struct rockchip_gem_object *rk_obj)
515 {
516 int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
517 DMA_BIDIRECTIONAL);
518 if (!count)
519 return -EINVAL;
520
521 if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
522 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
523 dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
524 DMA_BIDIRECTIONAL);
525 return -EINVAL;
526 }
527
528 rk_obj->dma_addr = sg_dma_address(sg->sgl);
529 rk_obj->sgt = sg;
530 return 0;
531 }
532
533 struct drm_gem_object *
534 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
535 struct dma_buf_attachment *attach,
536 struct sg_table *sg)
537 {
538 struct rockchip_drm_private *private = drm->dev_private;
539 struct rockchip_gem_object *rk_obj;
540 int ret;
541
542 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
543 if (IS_ERR(rk_obj))
544 return ERR_CAST(rk_obj);
545
546 if (private->domain)
547 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
548 else
549 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
550
551 if (ret < 0) {
552 DRM_ERROR("failed to import sg table: %d\n", ret);
553 goto err_free_rk_obj;
554 }
555
556 return &rk_obj->base;
557
558 err_free_rk_obj:
559 rockchip_gem_release_object(rk_obj);
560 return ERR_PTR(ret);
561 }
562
563 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
564 {
565 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
566
567 if (rk_obj->pages)
568 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
569 pgprot_writecombine(PAGE_KERNEL));
570
571 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
572 return NULL;
573
574 return rk_obj->kvaddr;
575 }
576
577 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
578 {
579 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
580
581 if (rk_obj->pages) {
582 vunmap(vaddr);
583 return;
584 }
585
586 /* Nothing to do if allocated by DMA mapping API. */
587 }