]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
de2ba664 AM |
2 | /* |
3 | * NVIDIA Tegra DRM GEM helper functions | |
4 | * | |
5 | * Copyright (C) 2012 Sascha Hauer, Pengutronix | |
7ecada3c | 6 | * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. |
de2ba664 AM |
7 | * |
8 | * Based on the GEM/CMA helpers | |
9 | * | |
10 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | |
de2ba664 AM |
11 | */ |
12 | ||
3800391d | 13 | #include <linux/dma-buf.h> |
df06b759 | 14 | #include <linux/iommu.h> |
773af77f TR |
15 | #include <drm/tegra_drm.h> |
16 | ||
d1f3e1e0 | 17 | #include "drm.h" |
de2ba664 AM |
18 | #include "gem.h" |
19 | ||
de2ba664 AM |
20 | static void tegra_bo_put(struct host1x_bo *bo) |
21 | { | |
3be82743 | 22 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 23 | |
7664b2fa | 24 | drm_gem_object_put_unlocked(&obj->gem); |
de2ba664 AM |
25 | } |
26 | ||
27 | static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) | |
28 | { | |
3be82743 | 29 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 30 | |
585ee0f2 MP |
31 | *sgt = obj->sgt; |
32 | ||
de2ba664 AM |
33 | return obj->paddr; |
34 | } | |
35 | ||
36 | static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) | |
37 | { | |
38 | } | |
39 | ||
40 | static void *tegra_bo_mmap(struct host1x_bo *bo) | |
41 | { | |
3be82743 | 42 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 43 | |
7ecada3c AM |
44 | if (obj->vaddr) |
45 | return obj->vaddr; | |
46 | else if (obj->gem.import_attach) | |
47 | return dma_buf_vmap(obj->gem.import_attach->dmabuf); | |
48 | else | |
49 | return vmap(obj->pages, obj->num_pages, VM_MAP, | |
50 | pgprot_writecombine(PAGE_KERNEL)); | |
de2ba664 AM |
51 | } |
52 | ||
53 | static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) | |
54 | { | |
7ecada3c AM |
55 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
56 | ||
57 | if (obj->vaddr) | |
58 | return; | |
59 | else if (obj->gem.import_attach) | |
60 | dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); | |
61 | else | |
62 | vunmap(addr); | |
de2ba664 AM |
63 | } |
64 | ||
65 | static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) | |
66 | { | |
3be82743 | 67 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 68 | |
7ecada3c AM |
69 | if (obj->vaddr) |
70 | return obj->vaddr + page * PAGE_SIZE; | |
71 | else if (obj->gem.import_attach) | |
72 | return dma_buf_kmap(obj->gem.import_attach->dmabuf, page); | |
73 | else | |
74 | return vmap(obj->pages + page, 1, VM_MAP, | |
75 | pgprot_writecombine(PAGE_KERNEL)); | |
de2ba664 AM |
76 | } |
77 | ||
78 | static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, | |
79 | void *addr) | |
80 | { | |
7ecada3c AM |
81 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
82 | ||
83 | if (obj->vaddr) | |
84 | return; | |
85 | else if (obj->gem.import_attach) | |
86 | dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr); | |
87 | else | |
88 | vunmap(addr); | |
de2ba664 AM |
89 | } |
90 | ||
91 | static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) | |
92 | { | |
3be82743 | 93 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 94 | |
7664b2fa | 95 | drm_gem_object_get(&obj->gem); |
de2ba664 AM |
96 | |
97 | return bo; | |
98 | } | |
99 | ||
425c0fdc | 100 | static const struct host1x_bo_ops tegra_bo_ops = { |
de2ba664 AM |
101 | .get = tegra_bo_get, |
102 | .put = tegra_bo_put, | |
103 | .pin = tegra_bo_pin, | |
104 | .unpin = tegra_bo_unpin, | |
105 | .mmap = tegra_bo_mmap, | |
106 | .munmap = tegra_bo_munmap, | |
107 | .kmap = tegra_bo_kmap, | |
108 | .kunmap = tegra_bo_kunmap, | |
109 | }; | |
110 | ||
df06b759 TR |
111 | static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) |
112 | { | |
113 | int prot = IOMMU_READ | IOMMU_WRITE; | |
04184b1f | 114 | int err; |
df06b759 TR |
115 | |
116 | if (bo->mm) | |
117 | return -EBUSY; | |
118 | ||
119 | bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); | |
120 | if (!bo->mm) | |
121 | return -ENOMEM; | |
122 | ||
347ad49d TR |
123 | mutex_lock(&tegra->mm_lock); |
124 | ||
4e64e553 CW |
125 | err = drm_mm_insert_node_generic(&tegra->mm, |
126 | bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); | |
df06b759 | 127 | if (err < 0) { |
04184b1f | 128 | dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n", |
df06b759 | 129 | err); |
347ad49d | 130 | goto unlock; |
df06b759 TR |
131 | } |
132 | ||
133 | bo->paddr = bo->mm->start; | |
134 | ||
04184b1f DO |
135 | bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, |
136 | bo->sgt->nents, prot); | |
137 | if (!bo->size) { | |
138 | dev_err(tegra->drm->dev, "failed to map buffer\n"); | |
139 | err = -ENOMEM; | |
df06b759 TR |
140 | goto remove; |
141 | } | |
142 | ||
347ad49d TR |
143 | mutex_unlock(&tegra->mm_lock); |
144 | ||
df06b759 TR |
145 | return 0; |
146 | ||
147 | remove: | |
148 | drm_mm_remove_node(bo->mm); | |
347ad49d TR |
149 | unlock: |
150 | mutex_unlock(&tegra->mm_lock); | |
df06b759 TR |
151 | kfree(bo->mm); |
152 | return err; | |
153 | } | |
154 | ||
155 | static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) | |
156 | { | |
157 | if (!bo->mm) | |
158 | return 0; | |
159 | ||
347ad49d | 160 | mutex_lock(&tegra->mm_lock); |
df06b759 TR |
161 | iommu_unmap(tegra->domain, bo->paddr, bo->size); |
162 | drm_mm_remove_node(bo->mm); | |
347ad49d TR |
163 | mutex_unlock(&tegra->mm_lock); |
164 | ||
df06b759 TR |
165 | kfree(bo->mm); |
166 | ||
167 | return 0; | |
168 | } | |
169 | ||
c28d4a31 TR |
170 | static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, |
171 | size_t size) | |
172 | { | |
173 | struct tegra_bo *bo; | |
174 | int err; | |
175 | ||
176 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
177 | if (!bo) | |
178 | return ERR_PTR(-ENOMEM); | |
179 | ||
180 | host1x_bo_init(&bo->base, &tegra_bo_ops); | |
181 | size = round_up(size, PAGE_SIZE); | |
182 | ||
183 | err = drm_gem_object_init(drm, &bo->gem, size); | |
184 | if (err < 0) | |
185 | goto free; | |
186 | ||
187 | err = drm_gem_create_mmap_offset(&bo->gem); | |
188 | if (err < 0) | |
189 | goto release; | |
190 | ||
191 | return bo; | |
192 | ||
193 | release: | |
194 | drm_gem_object_release(&bo->gem); | |
195 | free: | |
196 | kfree(bo); | |
197 | return ERR_PTR(err); | |
198 | } | |
199 | ||
df06b759 | 200 | static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) |
de2ba664 | 201 | { |
df06b759 | 202 | if (bo->pages) { |
bd43c9f0 | 203 | dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
61b51fb5 | 204 | DMA_FROM_DEVICE); |
df06b759 TR |
205 | drm_gem_put_pages(&bo->gem, bo->pages, true, true); |
206 | sg_free_table(bo->sgt); | |
207 | kfree(bo->sgt); | |
7e0180e3 | 208 | } else if (bo->vaddr) { |
f6e45661 | 209 | dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); |
df06b759 TR |
210 | } |
211 | } | |
212 | ||
73c42c79 | 213 | static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) |
df06b759 | 214 | { |
bd43c9f0 | 215 | int err; |
a04251fc | 216 | |
df06b759 TR |
217 | bo->pages = drm_gem_get_pages(&bo->gem); |
218 | if (IS_ERR(bo->pages)) | |
219 | return PTR_ERR(bo->pages); | |
220 | ||
73c42c79 | 221 | bo->num_pages = bo->gem.size >> PAGE_SHIFT; |
df06b759 | 222 | |
fd73caa5 | 223 | bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); |
bd43c9f0 TR |
224 | if (IS_ERR(bo->sgt)) { |
225 | err = PTR_ERR(bo->sgt); | |
a04251fc | 226 | goto put_pages; |
bd43c9f0 | 227 | } |
a04251fc | 228 | |
bd43c9f0 | 229 | err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
61b51fb5 | 230 | DMA_FROM_DEVICE); |
bd43c9f0 TR |
231 | if (err == 0) { |
232 | err = -EFAULT; | |
233 | goto free_sgt; | |
234 | } | |
a04251fc | 235 | |
df06b759 | 236 | return 0; |
a04251fc | 237 | |
bd43c9f0 TR |
238 | free_sgt: |
239 | sg_free_table(bo->sgt); | |
240 | kfree(bo->sgt); | |
a04251fc TR |
241 | put_pages: |
242 | drm_gem_put_pages(&bo->gem, bo->pages, false, false); | |
bd43c9f0 | 243 | return err; |
df06b759 TR |
244 | } |
245 | ||
73c42c79 | 246 | static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) |
df06b759 TR |
247 | { |
248 | struct tegra_drm *tegra = drm->dev_private; | |
249 | int err; | |
250 | ||
251 | if (tegra->domain) { | |
73c42c79 | 252 | err = tegra_bo_get_pages(drm, bo); |
df06b759 TR |
253 | if (err < 0) |
254 | return err; | |
255 | ||
256 | err = tegra_bo_iommu_map(tegra, bo); | |
257 | if (err < 0) { | |
258 | tegra_bo_free(drm, bo); | |
259 | return err; | |
260 | } | |
261 | } else { | |
73c42c79 TR |
262 | size_t size = bo->gem.size; |
263 | ||
f6e45661 LR |
264 | bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, |
265 | GFP_KERNEL | __GFP_NOWARN); | |
df06b759 TR |
266 | if (!bo->vaddr) { |
267 | dev_err(drm->dev, | |
268 | "failed to allocate buffer of size %zu\n", | |
269 | size); | |
270 | return -ENOMEM; | |
271 | } | |
272 | } | |
273 | ||
274 | return 0; | |
de2ba664 AM |
275 | } |
276 | ||
71c38629 | 277 | struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, |
773af77f | 278 | unsigned long flags) |
de2ba664 AM |
279 | { |
280 | struct tegra_bo *bo; | |
281 | int err; | |
282 | ||
c28d4a31 TR |
283 | bo = tegra_bo_alloc_object(drm, size); |
284 | if (IS_ERR(bo)) | |
285 | return bo; | |
de2ba664 | 286 | |
73c42c79 | 287 | err = tegra_bo_alloc(drm, bo); |
df06b759 TR |
288 | if (err < 0) |
289 | goto release; | |
de2ba664 | 290 | |
773af77f | 291 | if (flags & DRM_TEGRA_GEM_CREATE_TILED) |
c134f019 | 292 | bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; |
773af77f | 293 | |
db7fbdfd TR |
294 | if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) |
295 | bo->flags |= TEGRA_BO_BOTTOM_UP; | |
296 | ||
de2ba664 AM |
297 | return bo; |
298 | ||
df06b759 TR |
299 | release: |
300 | drm_gem_object_release(&bo->gem); | |
de2ba664 | 301 | kfree(bo); |
de2ba664 | 302 | return ERR_PTR(err); |
de2ba664 AM |
303 | } |
304 | ||
305 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, | |
3be82743 | 306 | struct drm_device *drm, |
71c38629 | 307 | size_t size, |
773af77f | 308 | unsigned long flags, |
71c38629 | 309 | u32 *handle) |
de2ba664 AM |
310 | { |
311 | struct tegra_bo *bo; | |
a8b48df5 | 312 | int err; |
de2ba664 | 313 | |
773af77f | 314 | bo = tegra_bo_create(drm, size, flags); |
de2ba664 AM |
315 | if (IS_ERR(bo)) |
316 | return bo; | |
317 | ||
a8b48df5 TR |
318 | err = drm_gem_handle_create(file, &bo->gem, handle); |
319 | if (err) { | |
320 | tegra_bo_free_object(&bo->gem); | |
321 | return ERR_PTR(err); | |
322 | } | |
de2ba664 | 323 | |
7664b2fa | 324 | drm_gem_object_put_unlocked(&bo->gem); |
de2ba664 AM |
325 | |
326 | return bo; | |
de2ba664 AM |
327 | } |
328 | ||
540457cc TR |
329 | static struct tegra_bo *tegra_bo_import(struct drm_device *drm, |
330 | struct dma_buf *buf) | |
3800391d | 331 | { |
df06b759 | 332 | struct tegra_drm *tegra = drm->dev_private; |
3800391d TR |
333 | struct dma_buf_attachment *attach; |
334 | struct tegra_bo *bo; | |
3800391d TR |
335 | int err; |
336 | ||
c28d4a31 TR |
337 | bo = tegra_bo_alloc_object(drm, buf->size); |
338 | if (IS_ERR(bo)) | |
339 | return bo; | |
3800391d TR |
340 | |
341 | attach = dma_buf_attach(buf, drm->dev); | |
342 | if (IS_ERR(attach)) { | |
343 | err = PTR_ERR(attach); | |
c28d4a31 | 344 | goto free; |
3800391d TR |
345 | } |
346 | ||
347 | get_dma_buf(buf); | |
348 | ||
349 | bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); | |
3800391d TR |
350 | if (IS_ERR(bo->sgt)) { |
351 | err = PTR_ERR(bo->sgt); | |
352 | goto detach; | |
353 | } | |
354 | ||
df06b759 TR |
355 | if (tegra->domain) { |
356 | err = tegra_bo_iommu_map(tegra, bo); | |
357 | if (err < 0) | |
358 | goto detach; | |
359 | } else { | |
360 | if (bo->sgt->nents > 1) { | |
361 | err = -EINVAL; | |
362 | goto detach; | |
363 | } | |
364 | ||
365 | bo->paddr = sg_dma_address(bo->sgt->sgl); | |
3800391d TR |
366 | } |
367 | ||
3800391d TR |
368 | bo->gem.import_attach = attach; |
369 | ||
370 | return bo; | |
371 | ||
372 | detach: | |
373 | if (!IS_ERR_OR_NULL(bo->sgt)) | |
374 | dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); | |
375 | ||
376 | dma_buf_detach(buf, attach); | |
377 | dma_buf_put(buf); | |
3800391d | 378 | free: |
c28d4a31 | 379 | drm_gem_object_release(&bo->gem); |
3800391d | 380 | kfree(bo); |
3800391d TR |
381 | return ERR_PTR(err); |
382 | } | |
383 | ||
de2ba664 AM |
384 | void tegra_bo_free_object(struct drm_gem_object *gem) |
385 | { | |
df06b759 | 386 | struct tegra_drm *tegra = gem->dev->dev_private; |
de2ba664 AM |
387 | struct tegra_bo *bo = to_tegra_bo(gem); |
388 | ||
df06b759 TR |
389 | if (tegra->domain) |
390 | tegra_bo_iommu_unmap(tegra, bo); | |
391 | ||
3800391d TR |
392 | if (gem->import_attach) { |
393 | dma_buf_unmap_attachment(gem->import_attach, bo->sgt, | |
394 | DMA_TO_DEVICE); | |
395 | drm_prime_gem_destroy(gem, NULL); | |
396 | } else { | |
df06b759 | 397 | tegra_bo_free(gem->dev, bo); |
3800391d TR |
398 | } |
399 | ||
de2ba664 | 400 | drm_gem_object_release(gem); |
de2ba664 AM |
401 | kfree(bo); |
402 | } | |
403 | ||
404 | int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, | |
405 | struct drm_mode_create_dumb *args) | |
406 | { | |
dc6057ec | 407 | unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
d1f3e1e0 | 408 | struct tegra_drm *tegra = drm->dev_private; |
de2ba664 AM |
409 | struct tegra_bo *bo; |
410 | ||
dc6057ec TR |
411 | args->pitch = round_up(min_pitch, tegra->pitch_align); |
412 | args->size = args->pitch * args->height; | |
de2ba664 | 413 | |
773af77f | 414 | bo = tegra_bo_create_with_handle(file, drm, args->size, 0, |
3be82743 | 415 | &args->handle); |
de2ba664 AM |
416 | if (IS_ERR(bo)) |
417 | return PTR_ERR(bo); | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
cc7add70 | 422 | static vm_fault_t tegra_bo_fault(struct vm_fault *vmf) |
df06b759 | 423 | { |
11bac800 | 424 | struct vm_area_struct *vma = vmf->vma; |
df06b759 TR |
425 | struct drm_gem_object *gem = vma->vm_private_data; |
426 | struct tegra_bo *bo = to_tegra_bo(gem); | |
427 | struct page *page; | |
428 | pgoff_t offset; | |
df06b759 TR |
429 | |
430 | if (!bo->pages) | |
431 | return VM_FAULT_SIGBUS; | |
432 | ||
1a29d85e | 433 | offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
df06b759 TR |
434 | page = bo->pages[offset]; |
435 | ||
cc7add70 | 436 | return vmf_insert_page(vma, vmf->address, page); |
df06b759 TR |
437 | } |
438 | ||
de2ba664 | 439 | const struct vm_operations_struct tegra_bo_vm_ops = { |
df06b759 | 440 | .fault = tegra_bo_fault, |
de2ba664 AM |
441 | .open = drm_gem_vm_open, |
442 | .close = drm_gem_vm_close, | |
443 | }; | |
444 | ||
04c07466 | 445 | int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) |
de2ba664 | 446 | { |
a8bc8c65 | 447 | struct tegra_bo *bo = to_tegra_bo(gem); |
de2ba664 | 448 | |
df06b759 TR |
449 | if (!bo->pages) { |
450 | unsigned long vm_pgoff = vma->vm_pgoff; | |
a8bc8c65 | 451 | int err; |
53ea7213 | 452 | |
a8bc8c65 TR |
453 | /* |
454 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), | |
455 | * and set the vm_pgoff (used as a fake buffer offset by DRM) | |
456 | * to 0 as we want to map the whole buffer. | |
457 | */ | |
df06b759 TR |
458 | vma->vm_flags &= ~VM_PFNMAP; |
459 | vma->vm_pgoff = 0; | |
460 | ||
a8bc8c65 | 461 | err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, |
f6e45661 | 462 | gem->size); |
a8bc8c65 | 463 | if (err < 0) { |
df06b759 | 464 | drm_gem_vm_close(vma); |
a8bc8c65 | 465 | return err; |
df06b759 TR |
466 | } |
467 | ||
468 | vma->vm_pgoff = vm_pgoff; | |
469 | } else { | |
470 | pgprot_t prot = vm_get_page_prot(vma->vm_flags); | |
53ea7213 | 471 | |
df06b759 TR |
472 | vma->vm_flags |= VM_MIXEDMAP; |
473 | vma->vm_flags &= ~VM_PFNMAP; | |
474 | ||
475 | vma->vm_page_prot = pgprot_writecombine(prot); | |
476 | } | |
de2ba664 | 477 | |
53ea7213 | 478 | return 0; |
de2ba664 | 479 | } |
3800391d | 480 | |
a8bc8c65 TR |
481 | int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) |
482 | { | |
483 | struct drm_gem_object *gem; | |
484 | int err; | |
485 | ||
486 | err = drm_gem_mmap(file, vma); | |
487 | if (err < 0) | |
488 | return err; | |
489 | ||
490 | gem = vma->vm_private_data; | |
491 | ||
04c07466 | 492 | return __tegra_gem_mmap(gem, vma); |
a8bc8c65 TR |
493 | } |
494 | ||
3800391d TR |
495 | static struct sg_table * |
496 | tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, | |
497 | enum dma_data_direction dir) | |
498 | { | |
499 | struct drm_gem_object *gem = attach->dmabuf->priv; | |
500 | struct tegra_bo *bo = to_tegra_bo(gem); | |
501 | struct sg_table *sgt; | |
502 | ||
503 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
504 | if (!sgt) | |
505 | return NULL; | |
506 | ||
df06b759 TR |
507 | if (bo->pages) { |
508 | struct scatterlist *sg; | |
509 | unsigned int i; | |
510 | ||
511 | if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) | |
512 | goto free; | |
3800391d | 513 | |
df06b759 TR |
514 | for_each_sg(sgt->sgl, sg, bo->num_pages, i) |
515 | sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); | |
516 | ||
517 | if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) | |
518 | goto free; | |
519 | } else { | |
520 | if (sg_alloc_table(sgt, 1, GFP_KERNEL)) | |
521 | goto free; | |
522 | ||
523 | sg_dma_address(sgt->sgl) = bo->paddr; | |
524 | sg_dma_len(sgt->sgl) = gem->size; | |
525 | } | |
3800391d TR |
526 | |
527 | return sgt; | |
df06b759 TR |
528 | |
529 | free: | |
530 | sg_free_table(sgt); | |
531 | kfree(sgt); | |
532 | return NULL; | |
3800391d TR |
533 | } |
534 | ||
535 | static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, | |
536 | struct sg_table *sgt, | |
537 | enum dma_data_direction dir) | |
538 | { | |
df06b759 TR |
539 | struct drm_gem_object *gem = attach->dmabuf->priv; |
540 | struct tegra_bo *bo = to_tegra_bo(gem); | |
541 | ||
542 | if (bo->pages) | |
543 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); | |
544 | ||
3800391d TR |
545 | sg_free_table(sgt); |
546 | kfree(sgt); | |
547 | } | |
548 | ||
549 | static void tegra_gem_prime_release(struct dma_buf *buf) | |
550 | { | |
551 | drm_gem_dmabuf_release(buf); | |
552 | } | |
553 | ||
27e92f1f TR |
554 | static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, |
555 | enum dma_data_direction direction) | |
556 | { | |
557 | struct drm_gem_object *gem = buf->priv; | |
558 | struct tegra_bo *bo = to_tegra_bo(gem); | |
559 | struct drm_device *drm = gem->dev; | |
560 | ||
561 | if (bo->pages) | |
562 | dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
563 | DMA_FROM_DEVICE); | |
564 | ||
565 | return 0; | |
566 | } | |
567 | ||
568 | static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, | |
569 | enum dma_data_direction direction) | |
570 | { | |
571 | struct drm_gem_object *gem = buf->priv; | |
572 | struct tegra_bo *bo = to_tegra_bo(gem); | |
573 | struct drm_device *drm = gem->dev; | |
574 | ||
575 | if (bo->pages) | |
576 | dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
577 | DMA_TO_DEVICE); | |
578 | ||
579 | return 0; | |
580 | } | |
581 | ||
3800391d TR |
582 | static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) |
583 | { | |
584 | return NULL; | |
585 | } | |
586 | ||
587 | static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, | |
588 | void *addr) | |
589 | { | |
590 | } | |
591 | ||
592 | static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) | |
593 | { | |
a8bc8c65 TR |
594 | struct drm_gem_object *gem = buf->priv; |
595 | int err; | |
596 | ||
597 | err = drm_gem_mmap_obj(gem, gem->size, vma); | |
598 | if (err < 0) | |
599 | return err; | |
600 | ||
04c07466 | 601 | return __tegra_gem_mmap(gem, vma); |
3800391d TR |
602 | } |
603 | ||
d40326f4 TR |
604 | static void *tegra_gem_prime_vmap(struct dma_buf *buf) |
605 | { | |
606 | struct drm_gem_object *gem = buf->priv; | |
607 | struct tegra_bo *bo = to_tegra_bo(gem); | |
608 | ||
609 | return bo->vaddr; | |
610 | } | |
611 | ||
612 | static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) | |
613 | { | |
614 | } | |
615 | ||
3800391d TR |
616 | static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { |
617 | .map_dma_buf = tegra_gem_prime_map_dma_buf, | |
618 | .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, | |
619 | .release = tegra_gem_prime_release, | |
27e92f1f TR |
620 | .begin_cpu_access = tegra_gem_prime_begin_cpu_access, |
621 | .end_cpu_access = tegra_gem_prime_end_cpu_access, | |
f9b67f00 LG |
622 | .map = tegra_gem_prime_kmap, |
623 | .unmap = tegra_gem_prime_kunmap, | |
3800391d | 624 | .mmap = tegra_gem_prime_mmap, |
d40326f4 TR |
625 | .vmap = tegra_gem_prime_vmap, |
626 | .vunmap = tegra_gem_prime_vunmap, | |
3800391d TR |
627 | }; |
628 | ||
629 | struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, | |
630 | struct drm_gem_object *gem, | |
631 | int flags) | |
632 | { | |
d8fbe341 SS |
633 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
634 | ||
4bd91a5b TR |
635 | exp_info.exp_name = KBUILD_MODNAME; |
636 | exp_info.owner = drm->driver->fops->owner; | |
d8fbe341 SS |
637 | exp_info.ops = &tegra_gem_prime_dmabuf_ops; |
638 | exp_info.size = gem->size; | |
639 | exp_info.flags = flags; | |
640 | exp_info.priv = gem; | |
641 | ||
a4fce9cb | 642 | return drm_gem_dmabuf_export(drm, &exp_info); |
3800391d TR |
643 | } |
644 | ||
645 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, | |
646 | struct dma_buf *buf) | |
647 | { | |
648 | struct tegra_bo *bo; | |
649 | ||
650 | if (buf->ops == &tegra_gem_prime_dmabuf_ops) { | |
651 | struct drm_gem_object *gem = buf->priv; | |
652 | ||
653 | if (gem->dev == drm) { | |
7664b2fa | 654 | drm_gem_object_get(gem); |
3800391d TR |
655 | return gem; |
656 | } | |
657 | } | |
658 | ||
659 | bo = tegra_bo_import(drm, buf); | |
660 | if (IS_ERR(bo)) | |
661 | return ERR_CAST(bo); | |
662 | ||
663 | return &bo->gem; | |
664 | } |