1 // SPDX-License-Identifier: GPL-2.0-only
3 * NVIDIA Tegra DRM GEM helper functions
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
8 * Based on the GEM/CMA helpers
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 #include <drm/tegra_drm.h>
20 static void tegra_bo_put(struct host1x_bo
*bo
)
22 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
24 drm_gem_object_put_unlocked(&obj
->gem
);
27 static dma_addr_t
tegra_bo_pin(struct host1x_bo
*bo
, struct sg_table
**sgt
)
29 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
36 static void tegra_bo_unpin(struct host1x_bo
*bo
, struct sg_table
*sgt
)
40 static void *tegra_bo_mmap(struct host1x_bo
*bo
)
42 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
46 else if (obj
->gem
.import_attach
)
47 return dma_buf_vmap(obj
->gem
.import_attach
->dmabuf
);
49 return vmap(obj
->pages
, obj
->num_pages
, VM_MAP
,
50 pgprot_writecombine(PAGE_KERNEL
));
53 static void tegra_bo_munmap(struct host1x_bo
*bo
, void *addr
)
55 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
59 else if (obj
->gem
.import_attach
)
60 dma_buf_vunmap(obj
->gem
.import_attach
->dmabuf
, addr
);
65 static void *tegra_bo_kmap(struct host1x_bo
*bo
, unsigned int page
)
67 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
70 return obj
->vaddr
+ page
* PAGE_SIZE
;
71 else if (obj
->gem
.import_attach
)
72 return dma_buf_kmap(obj
->gem
.import_attach
->dmabuf
, page
);
74 return vmap(obj
->pages
+ page
, 1, VM_MAP
,
75 pgprot_writecombine(PAGE_KERNEL
));
78 static void tegra_bo_kunmap(struct host1x_bo
*bo
, unsigned int page
,
81 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
85 else if (obj
->gem
.import_attach
)
86 dma_buf_kunmap(obj
->gem
.import_attach
->dmabuf
, page
, addr
);
91 static struct host1x_bo
*tegra_bo_get(struct host1x_bo
*bo
)
93 struct tegra_bo
*obj
= host1x_to_tegra_bo(bo
);
95 drm_gem_object_get(&obj
->gem
);
100 static const struct host1x_bo_ops tegra_bo_ops
= {
104 .unpin
= tegra_bo_unpin
,
105 .mmap
= tegra_bo_mmap
,
106 .munmap
= tegra_bo_munmap
,
107 .kmap
= tegra_bo_kmap
,
108 .kunmap
= tegra_bo_kunmap
,
111 static int tegra_bo_iommu_map(struct tegra_drm
*tegra
, struct tegra_bo
*bo
)
113 int prot
= IOMMU_READ
| IOMMU_WRITE
;
119 bo
->mm
= kzalloc(sizeof(*bo
->mm
), GFP_KERNEL
);
123 mutex_lock(&tegra
->mm_lock
);
125 err
= drm_mm_insert_node_generic(&tegra
->mm
,
126 bo
->mm
, bo
->gem
.size
, PAGE_SIZE
, 0, 0);
128 dev_err(tegra
->drm
->dev
, "out of I/O virtual memory: %d\n",
133 bo
->paddr
= bo
->mm
->start
;
135 bo
->size
= iommu_map_sg(tegra
->domain
, bo
->paddr
, bo
->sgt
->sgl
,
136 bo
->sgt
->nents
, prot
);
138 dev_err(tegra
->drm
->dev
, "failed to map buffer\n");
143 mutex_unlock(&tegra
->mm_lock
);
148 drm_mm_remove_node(bo
->mm
);
150 mutex_unlock(&tegra
->mm_lock
);
155 static int tegra_bo_iommu_unmap(struct tegra_drm
*tegra
, struct tegra_bo
*bo
)
160 mutex_lock(&tegra
->mm_lock
);
161 iommu_unmap(tegra
->domain
, bo
->paddr
, bo
->size
);
162 drm_mm_remove_node(bo
->mm
);
163 mutex_unlock(&tegra
->mm_lock
);
170 static struct tegra_bo
*tegra_bo_alloc_object(struct drm_device
*drm
,
176 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
178 return ERR_PTR(-ENOMEM
);
180 host1x_bo_init(&bo
->base
, &tegra_bo_ops
);
181 size
= round_up(size
, PAGE_SIZE
);
183 err
= drm_gem_object_init(drm
, &bo
->gem
, size
);
187 err
= drm_gem_create_mmap_offset(&bo
->gem
);
194 drm_gem_object_release(&bo
->gem
);
200 static void tegra_bo_free(struct drm_device
*drm
, struct tegra_bo
*bo
)
203 dma_unmap_sg(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
205 drm_gem_put_pages(&bo
->gem
, bo
->pages
, true, true);
206 sg_free_table(bo
->sgt
);
208 } else if (bo
->vaddr
) {
209 dma_free_wc(drm
->dev
, bo
->gem
.size
, bo
->vaddr
, bo
->paddr
);
213 static int tegra_bo_get_pages(struct drm_device
*drm
, struct tegra_bo
*bo
)
217 bo
->pages
= drm_gem_get_pages(&bo
->gem
);
218 if (IS_ERR(bo
->pages
))
219 return PTR_ERR(bo
->pages
);
221 bo
->num_pages
= bo
->gem
.size
>> PAGE_SHIFT
;
223 bo
->sgt
= drm_prime_pages_to_sg(bo
->pages
, bo
->num_pages
);
224 if (IS_ERR(bo
->sgt
)) {
225 err
= PTR_ERR(bo
->sgt
);
229 err
= dma_map_sg(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
239 sg_free_table(bo
->sgt
);
242 drm_gem_put_pages(&bo
->gem
, bo
->pages
, false, false);
246 static int tegra_bo_alloc(struct drm_device
*drm
, struct tegra_bo
*bo
)
248 struct tegra_drm
*tegra
= drm
->dev_private
;
252 err
= tegra_bo_get_pages(drm
, bo
);
256 err
= tegra_bo_iommu_map(tegra
, bo
);
258 tegra_bo_free(drm
, bo
);
262 size_t size
= bo
->gem
.size
;
264 bo
->vaddr
= dma_alloc_wc(drm
->dev
, size
, &bo
->paddr
,
265 GFP_KERNEL
| __GFP_NOWARN
);
268 "failed to allocate buffer of size %zu\n",
277 struct tegra_bo
*tegra_bo_create(struct drm_device
*drm
, size_t size
,
283 bo
= tegra_bo_alloc_object(drm
, size
);
287 err
= tegra_bo_alloc(drm
, bo
);
291 if (flags
& DRM_TEGRA_GEM_CREATE_TILED
)
292 bo
->tiling
.mode
= TEGRA_BO_TILING_MODE_TILED
;
294 if (flags
& DRM_TEGRA_GEM_CREATE_BOTTOM_UP
)
295 bo
->flags
|= TEGRA_BO_BOTTOM_UP
;
300 drm_gem_object_release(&bo
->gem
);
305 struct tegra_bo
*tegra_bo_create_with_handle(struct drm_file
*file
,
306 struct drm_device
*drm
,
314 bo
= tegra_bo_create(drm
, size
, flags
);
318 err
= drm_gem_handle_create(file
, &bo
->gem
, handle
);
320 tegra_bo_free_object(&bo
->gem
);
324 drm_gem_object_put_unlocked(&bo
->gem
);
329 static struct tegra_bo
*tegra_bo_import(struct drm_device
*drm
,
332 struct tegra_drm
*tegra
= drm
->dev_private
;
333 struct dma_buf_attachment
*attach
;
337 bo
= tegra_bo_alloc_object(drm
, buf
->size
);
341 attach
= dma_buf_attach(buf
, drm
->dev
);
342 if (IS_ERR(attach
)) {
343 err
= PTR_ERR(attach
);
349 bo
->sgt
= dma_buf_map_attachment(attach
, DMA_TO_DEVICE
);
350 if (IS_ERR(bo
->sgt
)) {
351 err
= PTR_ERR(bo
->sgt
);
356 err
= tegra_bo_iommu_map(tegra
, bo
);
360 if (bo
->sgt
->nents
> 1) {
365 bo
->paddr
= sg_dma_address(bo
->sgt
->sgl
);
368 bo
->gem
.import_attach
= attach
;
373 if (!IS_ERR_OR_NULL(bo
->sgt
))
374 dma_buf_unmap_attachment(attach
, bo
->sgt
, DMA_TO_DEVICE
);
376 dma_buf_detach(buf
, attach
);
379 drm_gem_object_release(&bo
->gem
);
384 void tegra_bo_free_object(struct drm_gem_object
*gem
)
386 struct tegra_drm
*tegra
= gem
->dev
->dev_private
;
387 struct tegra_bo
*bo
= to_tegra_bo(gem
);
390 tegra_bo_iommu_unmap(tegra
, bo
);
392 if (gem
->import_attach
) {
393 dma_buf_unmap_attachment(gem
->import_attach
, bo
->sgt
,
395 drm_prime_gem_destroy(gem
, NULL
);
397 tegra_bo_free(gem
->dev
, bo
);
400 drm_gem_object_release(gem
);
404 int tegra_bo_dumb_create(struct drm_file
*file
, struct drm_device
*drm
,
405 struct drm_mode_create_dumb
*args
)
407 unsigned int min_pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
408 struct tegra_drm
*tegra
= drm
->dev_private
;
411 args
->pitch
= round_up(min_pitch
, tegra
->pitch_align
);
412 args
->size
= args
->pitch
* args
->height
;
414 bo
= tegra_bo_create_with_handle(file
, drm
, args
->size
, 0,
422 static vm_fault_t
tegra_bo_fault(struct vm_fault
*vmf
)
424 struct vm_area_struct
*vma
= vmf
->vma
;
425 struct drm_gem_object
*gem
= vma
->vm_private_data
;
426 struct tegra_bo
*bo
= to_tegra_bo(gem
);
431 return VM_FAULT_SIGBUS
;
433 offset
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
434 page
= bo
->pages
[offset
];
436 return vmf_insert_page(vma
, vmf
->address
, page
);
439 const struct vm_operations_struct tegra_bo_vm_ops
= {
440 .fault
= tegra_bo_fault
,
441 .open
= drm_gem_vm_open
,
442 .close
= drm_gem_vm_close
,
445 int __tegra_gem_mmap(struct drm_gem_object
*gem
, struct vm_area_struct
*vma
)
447 struct tegra_bo
*bo
= to_tegra_bo(gem
);
450 unsigned long vm_pgoff
= vma
->vm_pgoff
;
454 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
455 * and set the vm_pgoff (used as a fake buffer offset by DRM)
456 * to 0 as we want to map the whole buffer.
458 vma
->vm_flags
&= ~VM_PFNMAP
;
461 err
= dma_mmap_wc(gem
->dev
->dev
, vma
, bo
->vaddr
, bo
->paddr
,
464 drm_gem_vm_close(vma
);
468 vma
->vm_pgoff
= vm_pgoff
;
470 pgprot_t prot
= vm_get_page_prot(vma
->vm_flags
);
472 vma
->vm_flags
|= VM_MIXEDMAP
;
473 vma
->vm_flags
&= ~VM_PFNMAP
;
475 vma
->vm_page_prot
= pgprot_writecombine(prot
);
481 int tegra_drm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
483 struct drm_gem_object
*gem
;
486 err
= drm_gem_mmap(file
, vma
);
490 gem
= vma
->vm_private_data
;
492 return __tegra_gem_mmap(gem
, vma
);
495 static struct sg_table
*
496 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment
*attach
,
497 enum dma_data_direction dir
)
499 struct drm_gem_object
*gem
= attach
->dmabuf
->priv
;
500 struct tegra_bo
*bo
= to_tegra_bo(gem
);
501 struct sg_table
*sgt
;
503 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
508 struct scatterlist
*sg
;
511 if (sg_alloc_table(sgt
, bo
->num_pages
, GFP_KERNEL
))
514 for_each_sg(sgt
->sgl
, sg
, bo
->num_pages
, i
)
515 sg_set_page(sg
, bo
->pages
[i
], PAGE_SIZE
, 0);
517 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0)
520 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
523 sg_dma_address(sgt
->sgl
) = bo
->paddr
;
524 sg_dma_len(sgt
->sgl
) = gem
->size
;
535 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment
*attach
,
536 struct sg_table
*sgt
,
537 enum dma_data_direction dir
)
539 struct drm_gem_object
*gem
= attach
->dmabuf
->priv
;
540 struct tegra_bo
*bo
= to_tegra_bo(gem
);
543 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
549 static void tegra_gem_prime_release(struct dma_buf
*buf
)
551 drm_gem_dmabuf_release(buf
);
554 static int tegra_gem_prime_begin_cpu_access(struct dma_buf
*buf
,
555 enum dma_data_direction direction
)
557 struct drm_gem_object
*gem
= buf
->priv
;
558 struct tegra_bo
*bo
= to_tegra_bo(gem
);
559 struct drm_device
*drm
= gem
->dev
;
562 dma_sync_sg_for_cpu(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
568 static int tegra_gem_prime_end_cpu_access(struct dma_buf
*buf
,
569 enum dma_data_direction direction
)
571 struct drm_gem_object
*gem
= buf
->priv
;
572 struct tegra_bo
*bo
= to_tegra_bo(gem
);
573 struct drm_device
*drm
= gem
->dev
;
576 dma_sync_sg_for_device(drm
->dev
, bo
->sgt
->sgl
, bo
->sgt
->nents
,
582 static void *tegra_gem_prime_kmap(struct dma_buf
*buf
, unsigned long page
)
587 static void tegra_gem_prime_kunmap(struct dma_buf
*buf
, unsigned long page
,
592 static int tegra_gem_prime_mmap(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
594 struct drm_gem_object
*gem
= buf
->priv
;
597 err
= drm_gem_mmap_obj(gem
, gem
->size
, vma
);
601 return __tegra_gem_mmap(gem
, vma
);
604 static void *tegra_gem_prime_vmap(struct dma_buf
*buf
)
606 struct drm_gem_object
*gem
= buf
->priv
;
607 struct tegra_bo
*bo
= to_tegra_bo(gem
);
612 static void tegra_gem_prime_vunmap(struct dma_buf
*buf
, void *vaddr
)
616 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops
= {
617 .map_dma_buf
= tegra_gem_prime_map_dma_buf
,
618 .unmap_dma_buf
= tegra_gem_prime_unmap_dma_buf
,
619 .release
= tegra_gem_prime_release
,
620 .begin_cpu_access
= tegra_gem_prime_begin_cpu_access
,
621 .end_cpu_access
= tegra_gem_prime_end_cpu_access
,
622 .map
= tegra_gem_prime_kmap
,
623 .unmap
= tegra_gem_prime_kunmap
,
624 .mmap
= tegra_gem_prime_mmap
,
625 .vmap
= tegra_gem_prime_vmap
,
626 .vunmap
= tegra_gem_prime_vunmap
,
629 struct dma_buf
*tegra_gem_prime_export(struct drm_gem_object
*gem
,
632 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
634 exp_info
.exp_name
= KBUILD_MODNAME
;
635 exp_info
.owner
= gem
->dev
->driver
->fops
->owner
;
636 exp_info
.ops
= &tegra_gem_prime_dmabuf_ops
;
637 exp_info
.size
= gem
->size
;
638 exp_info
.flags
= flags
;
641 return drm_gem_dmabuf_export(gem
->dev
, &exp_info
);
644 struct drm_gem_object
*tegra_gem_prime_import(struct drm_device
*drm
,
649 if (buf
->ops
== &tegra_gem_prime_dmabuf_ops
) {
650 struct drm_gem_object
*gem
= buf
->priv
;
652 if (gem
->dev
== drm
) {
653 drm_gem_object_get(gem
);
658 bo
= tegra_bo_import(drm
, buf
);