]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
de2ba664 AM |
2 | /* |
3 | * NVIDIA Tegra DRM GEM helper functions | |
4 | * | |
5 | * Copyright (C) 2012 Sascha Hauer, Pengutronix | |
7ecada3c | 6 | * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. |
de2ba664 AM |
7 | * |
8 | * Based on the GEM/CMA helpers | |
9 | * | |
10 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | |
de2ba664 AM |
11 | */ |
12 | ||
3800391d | 13 | #include <linux/dma-buf.h> |
df06b759 | 14 | #include <linux/iommu.h> |
eb1df694 SR |
15 | |
16 | #include <drm/drm_drv.h> | |
17 | #include <drm/drm_prime.h> | |
773af77f TR |
18 | #include <drm/tegra_drm.h> |
19 | ||
d1f3e1e0 | 20 | #include "drm.h" |
de2ba664 AM |
21 | #include "gem.h" |
22 | ||
de2ba664 AM |
23 | static void tegra_bo_put(struct host1x_bo *bo) |
24 | { | |
3be82743 | 25 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 26 | |
7664b2fa | 27 | drm_gem_object_put_unlocked(&obj->gem); |
de2ba664 AM |
28 | } |
29 | ||
1f16deac TR |
30 | /* XXX move this into lib/scatterlist.c? */ |
31 | static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg, | |
32 | unsigned int nents, gfp_t gfp_mask) | |
33 | { | |
34 | struct scatterlist *dst; | |
35 | unsigned int i; | |
36 | int err; | |
37 | ||
38 | err = sg_alloc_table(sgt, nents, gfp_mask); | |
39 | if (err < 0) | |
40 | return err; | |
41 | ||
42 | dst = sgt->sgl; | |
43 | ||
44 | for (i = 0; i < nents; i++) { | |
45 | sg_set_page(dst, sg_page(sg), sg->length, 0); | |
46 | dst = sg_next(dst); | |
47 | sg = sg_next(sg); | |
48 | } | |
49 | ||
50 | return 0; | |
51 | } | |
52 | ||
80327ce3 TR |
53 | static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, |
54 | dma_addr_t *phys) | |
de2ba664 | 55 | { |
3be82743 | 56 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
80327ce3 TR |
57 | struct sg_table *sgt; |
58 | int err; | |
de2ba664 | 59 | |
af1cbfb9 TR |
60 | /* |
61 | * If we've manually mapped the buffer object through the IOMMU, make | |
62 | * sure to return the IOVA address of our mapping. | |
273da5a0 TR |
63 | * |
64 | * Similarly, for buffers that have been allocated by the DMA API the | |
65 | * physical address can be used for devices that are not attached to | |
66 | * an IOMMU. For these devices, callers must pass a valid pointer via | |
67 | * the @phys argument. | |
68 | * | |
69 | * Imported buffers were also already mapped at import time, so the | |
70 | * existing mapping can be reused. | |
af1cbfb9 | 71 | */ |
273da5a0 | 72 | if (phys) { |
80327ce3 | 73 | *phys = obj->iova; |
af1cbfb9 TR |
74 | return NULL; |
75 | } | |
585ee0f2 | 76 | |
af1cbfb9 TR |
77 | /* |
78 | * If we don't have a mapping for this buffer yet, return an SG table | |
79 | * so that host1x can do the mapping for us via the DMA API. | |
80 | */ | |
80327ce3 TR |
81 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
82 | if (!sgt) | |
83 | return ERR_PTR(-ENOMEM); | |
84 | ||
85 | if (obj->pages) { | |
1f16deac TR |
86 | /* |
87 | * If the buffer object was allocated from the explicit IOMMU | |
88 | * API code paths, construct an SG table from the pages. | |
89 | */ | |
80327ce3 TR |
90 | err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages, |
91 | 0, obj->gem.size, GFP_KERNEL); | |
92 | if (err < 0) | |
93 | goto free; | |
1f16deac TR |
94 | } else if (obj->sgt) { |
95 | /* | |
96 | * If the buffer object already has an SG table but no pages | |
97 | * were allocated for it, it means the buffer was imported and | |
98 | * the SG table needs to be copied to avoid overwriting any | |
99 | * other potential users of the original SG table. | |
100 | */ | |
101 | err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents, | |
102 | GFP_KERNEL); | |
103 | if (err < 0) | |
104 | goto free; | |
80327ce3 | 105 | } else { |
1f16deac TR |
106 | /* |
107 | * If the buffer object had no pages allocated and if it was | |
108 | * not imported, it had to be allocated with the DMA API, so | |
109 | * the DMA API helper can be used. | |
110 | */ | |
80327ce3 TR |
111 | err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova, |
112 | obj->gem.size); | |
113 | if (err < 0) | |
114 | goto free; | |
115 | } | |
116 | ||
117 | return sgt; | |
118 | ||
119 | free: | |
120 | kfree(sgt); | |
121 | return ERR_PTR(err); | |
de2ba664 AM |
122 | } |
123 | ||
80327ce3 | 124 | static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) |
de2ba664 | 125 | { |
af1cbfb9 TR |
126 | if (sgt) { |
127 | sg_free_table(sgt); | |
128 | kfree(sgt); | |
129 | } | |
de2ba664 AM |
130 | } |
131 | ||
132 | static void *tegra_bo_mmap(struct host1x_bo *bo) | |
133 | { | |
3be82743 | 134 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 135 | |
7ecada3c AM |
136 | if (obj->vaddr) |
137 | return obj->vaddr; | |
138 | else if (obj->gem.import_attach) | |
139 | return dma_buf_vmap(obj->gem.import_attach->dmabuf); | |
140 | else | |
141 | return vmap(obj->pages, obj->num_pages, VM_MAP, | |
142 | pgprot_writecombine(PAGE_KERNEL)); | |
de2ba664 AM |
143 | } |
144 | ||
145 | static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) | |
146 | { | |
7ecada3c AM |
147 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
148 | ||
149 | if (obj->vaddr) | |
150 | return; | |
151 | else if (obj->gem.import_attach) | |
152 | dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); | |
153 | else | |
154 | vunmap(addr); | |
de2ba664 AM |
155 | } |
156 | ||
de2ba664 AM |
157 | static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) |
158 | { | |
3be82743 | 159 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
de2ba664 | 160 | |
7664b2fa | 161 | drm_gem_object_get(&obj->gem); |
de2ba664 AM |
162 | |
163 | return bo; | |
164 | } | |
165 | ||
425c0fdc | 166 | static const struct host1x_bo_ops tegra_bo_ops = { |
de2ba664 AM |
167 | .get = tegra_bo_get, |
168 | .put = tegra_bo_put, | |
169 | .pin = tegra_bo_pin, | |
170 | .unpin = tegra_bo_unpin, | |
171 | .mmap = tegra_bo_mmap, | |
172 | .munmap = tegra_bo_munmap, | |
de2ba664 AM |
173 | }; |
174 | ||
df06b759 TR |
175 | static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) |
176 | { | |
177 | int prot = IOMMU_READ | IOMMU_WRITE; | |
04184b1f | 178 | int err; |
df06b759 TR |
179 | |
180 | if (bo->mm) | |
181 | return -EBUSY; | |
182 | ||
183 | bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); | |
184 | if (!bo->mm) | |
185 | return -ENOMEM; | |
186 | ||
347ad49d TR |
187 | mutex_lock(&tegra->mm_lock); |
188 | ||
4e64e553 CW |
189 | err = drm_mm_insert_node_generic(&tegra->mm, |
190 | bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); | |
df06b759 | 191 | if (err < 0) { |
04184b1f | 192 | dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n", |
df06b759 | 193 | err); |
347ad49d | 194 | goto unlock; |
df06b759 TR |
195 | } |
196 | ||
7e3c53a0 | 197 | bo->iova = bo->mm->start; |
df06b759 | 198 | |
7e3c53a0 | 199 | bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl, |
04184b1f DO |
200 | bo->sgt->nents, prot); |
201 | if (!bo->size) { | |
202 | dev_err(tegra->drm->dev, "failed to map buffer\n"); | |
203 | err = -ENOMEM; | |
df06b759 TR |
204 | goto remove; |
205 | } | |
206 | ||
347ad49d TR |
207 | mutex_unlock(&tegra->mm_lock); |
208 | ||
df06b759 TR |
209 | return 0; |
210 | ||
211 | remove: | |
212 | drm_mm_remove_node(bo->mm); | |
347ad49d TR |
213 | unlock: |
214 | mutex_unlock(&tegra->mm_lock); | |
df06b759 TR |
215 | kfree(bo->mm); |
216 | return err; | |
217 | } | |
218 | ||
219 | static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) | |
220 | { | |
221 | if (!bo->mm) | |
222 | return 0; | |
223 | ||
347ad49d | 224 | mutex_lock(&tegra->mm_lock); |
7e3c53a0 | 225 | iommu_unmap(tegra->domain, bo->iova, bo->size); |
df06b759 | 226 | drm_mm_remove_node(bo->mm); |
347ad49d TR |
227 | mutex_unlock(&tegra->mm_lock); |
228 | ||
df06b759 TR |
229 | kfree(bo->mm); |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
c28d4a31 TR |
234 | static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, |
235 | size_t size) | |
236 | { | |
237 | struct tegra_bo *bo; | |
238 | int err; | |
239 | ||
240 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
241 | if (!bo) | |
242 | return ERR_PTR(-ENOMEM); | |
243 | ||
244 | host1x_bo_init(&bo->base, &tegra_bo_ops); | |
245 | size = round_up(size, PAGE_SIZE); | |
246 | ||
247 | err = drm_gem_object_init(drm, &bo->gem, size); | |
248 | if (err < 0) | |
249 | goto free; | |
250 | ||
251 | err = drm_gem_create_mmap_offset(&bo->gem); | |
252 | if (err < 0) | |
253 | goto release; | |
254 | ||
255 | return bo; | |
256 | ||
257 | release: | |
258 | drm_gem_object_release(&bo->gem); | |
259 | free: | |
260 | kfree(bo); | |
261 | return ERR_PTR(err); | |
262 | } | |
263 | ||
df06b759 | 264 | static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) |
de2ba664 | 265 | { |
df06b759 | 266 | if (bo->pages) { |
bd43c9f0 | 267 | dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
61b51fb5 | 268 | DMA_FROM_DEVICE); |
df06b759 TR |
269 | drm_gem_put_pages(&bo->gem, bo->pages, true, true); |
270 | sg_free_table(bo->sgt); | |
271 | kfree(bo->sgt); | |
7e0180e3 | 272 | } else if (bo->vaddr) { |
7e3c53a0 | 273 | dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova); |
df06b759 TR |
274 | } |
275 | } | |
276 | ||
73c42c79 | 277 | static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) |
df06b759 | 278 | { |
bd43c9f0 | 279 | int err; |
a04251fc | 280 | |
df06b759 TR |
281 | bo->pages = drm_gem_get_pages(&bo->gem); |
282 | if (IS_ERR(bo->pages)) | |
283 | return PTR_ERR(bo->pages); | |
284 | ||
73c42c79 | 285 | bo->num_pages = bo->gem.size >> PAGE_SHIFT; |
df06b759 | 286 | |
fd73caa5 | 287 | bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); |
bd43c9f0 TR |
288 | if (IS_ERR(bo->sgt)) { |
289 | err = PTR_ERR(bo->sgt); | |
a04251fc | 290 | goto put_pages; |
bd43c9f0 | 291 | } |
a04251fc | 292 | |
bd43c9f0 | 293 | err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
61b51fb5 | 294 | DMA_FROM_DEVICE); |
bd43c9f0 TR |
295 | if (err == 0) { |
296 | err = -EFAULT; | |
297 | goto free_sgt; | |
298 | } | |
a04251fc | 299 | |
df06b759 | 300 | return 0; |
a04251fc | 301 | |
bd43c9f0 TR |
302 | free_sgt: |
303 | sg_free_table(bo->sgt); | |
304 | kfree(bo->sgt); | |
a04251fc TR |
305 | put_pages: |
306 | drm_gem_put_pages(&bo->gem, bo->pages, false, false); | |
bd43c9f0 | 307 | return err; |
df06b759 TR |
308 | } |
309 | ||
73c42c79 | 310 | static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) |
df06b759 TR |
311 | { |
312 | struct tegra_drm *tegra = drm->dev_private; | |
313 | int err; | |
314 | ||
315 | if (tegra->domain) { | |
73c42c79 | 316 | err = tegra_bo_get_pages(drm, bo); |
df06b759 TR |
317 | if (err < 0) |
318 | return err; | |
319 | ||
320 | err = tegra_bo_iommu_map(tegra, bo); | |
321 | if (err < 0) { | |
322 | tegra_bo_free(drm, bo); | |
323 | return err; | |
324 | } | |
325 | } else { | |
73c42c79 TR |
326 | size_t size = bo->gem.size; |
327 | ||
7e3c53a0 | 328 | bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova, |
f6e45661 | 329 | GFP_KERNEL | __GFP_NOWARN); |
df06b759 TR |
330 | if (!bo->vaddr) { |
331 | dev_err(drm->dev, | |
332 | "failed to allocate buffer of size %zu\n", | |
333 | size); | |
334 | return -ENOMEM; | |
335 | } | |
336 | } | |
337 | ||
338 | return 0; | |
de2ba664 AM |
339 | } |
340 | ||
71c38629 | 341 | struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, |
773af77f | 342 | unsigned long flags) |
de2ba664 AM |
343 | { |
344 | struct tegra_bo *bo; | |
345 | int err; | |
346 | ||
c28d4a31 TR |
347 | bo = tegra_bo_alloc_object(drm, size); |
348 | if (IS_ERR(bo)) | |
349 | return bo; | |
de2ba664 | 350 | |
73c42c79 | 351 | err = tegra_bo_alloc(drm, bo); |
df06b759 TR |
352 | if (err < 0) |
353 | goto release; | |
de2ba664 | 354 | |
773af77f | 355 | if (flags & DRM_TEGRA_GEM_CREATE_TILED) |
c134f019 | 356 | bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; |
773af77f | 357 | |
db7fbdfd TR |
358 | if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) |
359 | bo->flags |= TEGRA_BO_BOTTOM_UP; | |
360 | ||
de2ba664 AM |
361 | return bo; |
362 | ||
df06b759 TR |
363 | release: |
364 | drm_gem_object_release(&bo->gem); | |
de2ba664 | 365 | kfree(bo); |
de2ba664 | 366 | return ERR_PTR(err); |
de2ba664 AM |
367 | } |
368 | ||
369 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, | |
3be82743 | 370 | struct drm_device *drm, |
71c38629 | 371 | size_t size, |
773af77f | 372 | unsigned long flags, |
71c38629 | 373 | u32 *handle) |
de2ba664 AM |
374 | { |
375 | struct tegra_bo *bo; | |
a8b48df5 | 376 | int err; |
de2ba664 | 377 | |
773af77f | 378 | bo = tegra_bo_create(drm, size, flags); |
de2ba664 AM |
379 | if (IS_ERR(bo)) |
380 | return bo; | |
381 | ||
a8b48df5 TR |
382 | err = drm_gem_handle_create(file, &bo->gem, handle); |
383 | if (err) { | |
384 | tegra_bo_free_object(&bo->gem); | |
385 | return ERR_PTR(err); | |
386 | } | |
de2ba664 | 387 | |
7664b2fa | 388 | drm_gem_object_put_unlocked(&bo->gem); |
de2ba664 AM |
389 | |
390 | return bo; | |
de2ba664 AM |
391 | } |
392 | ||
540457cc TR |
393 | static struct tegra_bo *tegra_bo_import(struct drm_device *drm, |
394 | struct dma_buf *buf) | |
3800391d | 395 | { |
df06b759 | 396 | struct tegra_drm *tegra = drm->dev_private; |
3800391d TR |
397 | struct dma_buf_attachment *attach; |
398 | struct tegra_bo *bo; | |
3800391d TR |
399 | int err; |
400 | ||
c28d4a31 TR |
401 | bo = tegra_bo_alloc_object(drm, buf->size); |
402 | if (IS_ERR(bo)) | |
403 | return bo; | |
3800391d TR |
404 | |
405 | attach = dma_buf_attach(buf, drm->dev); | |
406 | if (IS_ERR(attach)) { | |
407 | err = PTR_ERR(attach); | |
c28d4a31 | 408 | goto free; |
3800391d TR |
409 | } |
410 | ||
411 | get_dma_buf(buf); | |
412 | ||
413 | bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); | |
3800391d TR |
414 | if (IS_ERR(bo->sgt)) { |
415 | err = PTR_ERR(bo->sgt); | |
416 | goto detach; | |
417 | } | |
418 | ||
df06b759 TR |
419 | if (tegra->domain) { |
420 | err = tegra_bo_iommu_map(tegra, bo); | |
421 | if (err < 0) | |
422 | goto detach; | |
3800391d TR |
423 | } |
424 | ||
3800391d TR |
425 | bo->gem.import_attach = attach; |
426 | ||
427 | return bo; | |
428 | ||
429 | detach: | |
430 | if (!IS_ERR_OR_NULL(bo->sgt)) | |
431 | dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); | |
432 | ||
433 | dma_buf_detach(buf, attach); | |
434 | dma_buf_put(buf); | |
3800391d | 435 | free: |
c28d4a31 | 436 | drm_gem_object_release(&bo->gem); |
3800391d | 437 | kfree(bo); |
3800391d TR |
438 | return ERR_PTR(err); |
439 | } | |
440 | ||
de2ba664 AM |
441 | void tegra_bo_free_object(struct drm_gem_object *gem) |
442 | { | |
df06b759 | 443 | struct tegra_drm *tegra = gem->dev->dev_private; |
de2ba664 AM |
444 | struct tegra_bo *bo = to_tegra_bo(gem); |
445 | ||
df06b759 TR |
446 | if (tegra->domain) |
447 | tegra_bo_iommu_unmap(tegra, bo); | |
448 | ||
3800391d TR |
449 | if (gem->import_attach) { |
450 | dma_buf_unmap_attachment(gem->import_attach, bo->sgt, | |
451 | DMA_TO_DEVICE); | |
452 | drm_prime_gem_destroy(gem, NULL); | |
453 | } else { | |
df06b759 | 454 | tegra_bo_free(gem->dev, bo); |
3800391d TR |
455 | } |
456 | ||
de2ba664 | 457 | drm_gem_object_release(gem); |
de2ba664 AM |
458 | kfree(bo); |
459 | } | |
460 | ||
461 | int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, | |
462 | struct drm_mode_create_dumb *args) | |
463 | { | |
dc6057ec | 464 | unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
d1f3e1e0 | 465 | struct tegra_drm *tegra = drm->dev_private; |
de2ba664 AM |
466 | struct tegra_bo *bo; |
467 | ||
dc6057ec TR |
468 | args->pitch = round_up(min_pitch, tegra->pitch_align); |
469 | args->size = args->pitch * args->height; | |
de2ba664 | 470 | |
773af77f | 471 | bo = tegra_bo_create_with_handle(file, drm, args->size, 0, |
3be82743 | 472 | &args->handle); |
de2ba664 AM |
473 | if (IS_ERR(bo)) |
474 | return PTR_ERR(bo); | |
475 | ||
476 | return 0; | |
477 | } | |
478 | ||
cc7add70 | 479 | static vm_fault_t tegra_bo_fault(struct vm_fault *vmf) |
df06b759 | 480 | { |
11bac800 | 481 | struct vm_area_struct *vma = vmf->vma; |
df06b759 TR |
482 | struct drm_gem_object *gem = vma->vm_private_data; |
483 | struct tegra_bo *bo = to_tegra_bo(gem); | |
484 | struct page *page; | |
485 | pgoff_t offset; | |
df06b759 TR |
486 | |
487 | if (!bo->pages) | |
488 | return VM_FAULT_SIGBUS; | |
489 | ||
1a29d85e | 490 | offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
df06b759 TR |
491 | page = bo->pages[offset]; |
492 | ||
cc7add70 | 493 | return vmf_insert_page(vma, vmf->address, page); |
df06b759 TR |
494 | } |
495 | ||
de2ba664 | 496 | const struct vm_operations_struct tegra_bo_vm_ops = { |
df06b759 | 497 | .fault = tegra_bo_fault, |
de2ba664 AM |
498 | .open = drm_gem_vm_open, |
499 | .close = drm_gem_vm_close, | |
500 | }; | |
501 | ||
04c07466 | 502 | int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) |
de2ba664 | 503 | { |
a8bc8c65 | 504 | struct tegra_bo *bo = to_tegra_bo(gem); |
de2ba664 | 505 | |
df06b759 TR |
506 | if (!bo->pages) { |
507 | unsigned long vm_pgoff = vma->vm_pgoff; | |
a8bc8c65 | 508 | int err; |
53ea7213 | 509 | |
a8bc8c65 TR |
510 | /* |
511 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), | |
512 | * and set the vm_pgoff (used as a fake buffer offset by DRM) | |
513 | * to 0 as we want to map the whole buffer. | |
514 | */ | |
df06b759 TR |
515 | vma->vm_flags &= ~VM_PFNMAP; |
516 | vma->vm_pgoff = 0; | |
517 | ||
7e3c53a0 | 518 | err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova, |
f6e45661 | 519 | gem->size); |
a8bc8c65 | 520 | if (err < 0) { |
df06b759 | 521 | drm_gem_vm_close(vma); |
a8bc8c65 | 522 | return err; |
df06b759 TR |
523 | } |
524 | ||
525 | vma->vm_pgoff = vm_pgoff; | |
526 | } else { | |
527 | pgprot_t prot = vm_get_page_prot(vma->vm_flags); | |
53ea7213 | 528 | |
df06b759 TR |
529 | vma->vm_flags |= VM_MIXEDMAP; |
530 | vma->vm_flags &= ~VM_PFNMAP; | |
531 | ||
532 | vma->vm_page_prot = pgprot_writecombine(prot); | |
533 | } | |
de2ba664 | 534 | |
53ea7213 | 535 | return 0; |
de2ba664 | 536 | } |
3800391d | 537 | |
a8bc8c65 TR |
538 | int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) |
539 | { | |
540 | struct drm_gem_object *gem; | |
541 | int err; | |
542 | ||
543 | err = drm_gem_mmap(file, vma); | |
544 | if (err < 0) | |
545 | return err; | |
546 | ||
547 | gem = vma->vm_private_data; | |
548 | ||
04c07466 | 549 | return __tegra_gem_mmap(gem, vma); |
a8bc8c65 TR |
550 | } |
551 | ||
3800391d TR |
552 | static struct sg_table * |
553 | tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, | |
554 | enum dma_data_direction dir) | |
555 | { | |
556 | struct drm_gem_object *gem = attach->dmabuf->priv; | |
557 | struct tegra_bo *bo = to_tegra_bo(gem); | |
558 | struct sg_table *sgt; | |
559 | ||
560 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
561 | if (!sgt) | |
562 | return NULL; | |
563 | ||
df06b759 | 564 | if (bo->pages) { |
acf6b77c TR |
565 | if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages, |
566 | 0, gem->size, GFP_KERNEL) < 0) | |
df06b759 | 567 | goto free; |
df06b759 | 568 | } else { |
d81f3431 TR |
569 | if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova, |
570 | gem->size) < 0) | |
df06b759 | 571 | goto free; |
df06b759 | 572 | } |
3800391d | 573 | |
8b5a3c17 TR |
574 | if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) |
575 | goto free; | |
576 | ||
3800391d | 577 | return sgt; |
df06b759 TR |
578 | |
579 | free: | |
580 | sg_free_table(sgt); | |
581 | kfree(sgt); | |
582 | return NULL; | |
3800391d TR |
583 | } |
584 | ||
585 | static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, | |
586 | struct sg_table *sgt, | |
587 | enum dma_data_direction dir) | |
588 | { | |
df06b759 TR |
589 | struct drm_gem_object *gem = attach->dmabuf->priv; |
590 | struct tegra_bo *bo = to_tegra_bo(gem); | |
591 | ||
592 | if (bo->pages) | |
593 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); | |
594 | ||
3800391d TR |
595 | sg_free_table(sgt); |
596 | kfree(sgt); | |
597 | } | |
598 | ||
599 | static void tegra_gem_prime_release(struct dma_buf *buf) | |
600 | { | |
601 | drm_gem_dmabuf_release(buf); | |
602 | } | |
603 | ||
27e92f1f TR |
604 | static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, |
605 | enum dma_data_direction direction) | |
606 | { | |
607 | struct drm_gem_object *gem = buf->priv; | |
608 | struct tegra_bo *bo = to_tegra_bo(gem); | |
609 | struct drm_device *drm = gem->dev; | |
610 | ||
611 | if (bo->pages) | |
612 | dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
613 | DMA_FROM_DEVICE); | |
614 | ||
615 | return 0; | |
616 | } | |
617 | ||
618 | static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, | |
619 | enum dma_data_direction direction) | |
620 | { | |
621 | struct drm_gem_object *gem = buf->priv; | |
622 | struct tegra_bo *bo = to_tegra_bo(gem); | |
623 | struct drm_device *drm = gem->dev; | |
624 | ||
625 | if (bo->pages) | |
626 | dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
627 | DMA_TO_DEVICE); | |
628 | ||
629 | return 0; | |
630 | } | |
631 | ||
3800391d TR |
632 | static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) |
633 | { | |
a8bc8c65 TR |
634 | struct drm_gem_object *gem = buf->priv; |
635 | int err; | |
636 | ||
637 | err = drm_gem_mmap_obj(gem, gem->size, vma); | |
638 | if (err < 0) | |
639 | return err; | |
640 | ||
04c07466 | 641 | return __tegra_gem_mmap(gem, vma); |
3800391d TR |
642 | } |
643 | ||
d40326f4 TR |
644 | static void *tegra_gem_prime_vmap(struct dma_buf *buf) |
645 | { | |
646 | struct drm_gem_object *gem = buf->priv; | |
647 | struct tegra_bo *bo = to_tegra_bo(gem); | |
648 | ||
649 | return bo->vaddr; | |
650 | } | |
651 | ||
652 | static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) | |
653 | { | |
654 | } | |
655 | ||
3800391d TR |
656 | static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { |
657 | .map_dma_buf = tegra_gem_prime_map_dma_buf, | |
658 | .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, | |
659 | .release = tegra_gem_prime_release, | |
27e92f1f TR |
660 | .begin_cpu_access = tegra_gem_prime_begin_cpu_access, |
661 | .end_cpu_access = tegra_gem_prime_end_cpu_access, | |
3800391d | 662 | .mmap = tegra_gem_prime_mmap, |
d40326f4 TR |
663 | .vmap = tegra_gem_prime_vmap, |
664 | .vunmap = tegra_gem_prime_vunmap, | |
3800391d TR |
665 | }; |
666 | ||
e4fa8457 | 667 | struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem, |
3800391d TR |
668 | int flags) |
669 | { | |
d8fbe341 SS |
670 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
671 | ||
4bd91a5b | 672 | exp_info.exp_name = KBUILD_MODNAME; |
e4fa8457 | 673 | exp_info.owner = gem->dev->driver->fops->owner; |
d8fbe341 SS |
674 | exp_info.ops = &tegra_gem_prime_dmabuf_ops; |
675 | exp_info.size = gem->size; | |
676 | exp_info.flags = flags; | |
677 | exp_info.priv = gem; | |
678 | ||
e4fa8457 | 679 | return drm_gem_dmabuf_export(gem->dev, &exp_info); |
3800391d TR |
680 | } |
681 | ||
682 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, | |
683 | struct dma_buf *buf) | |
684 | { | |
685 | struct tegra_bo *bo; | |
686 | ||
687 | if (buf->ops == &tegra_gem_prime_dmabuf_ops) { | |
688 | struct drm_gem_object *gem = buf->priv; | |
689 | ||
690 | if (gem->dev == drm) { | |
7664b2fa | 691 | drm_gem_object_get(gem); |
3800391d TR |
692 | return gem; |
693 | } | |
694 | } | |
695 | ||
696 | bo = tegra_bo_import(drm, buf); | |
697 | if (IS_ERR(bo)) | |
698 | return ERR_CAST(bo); | |
699 | ||
700 | return &bo->gem; | |
701 | } |