]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * NVIDIA Tegra DRM GEM helper functions | |
4 | * | |
5 | * Copyright (C) 2012 Sascha Hauer, Pengutronix | |
6 | * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. | |
7 | * | |
8 | * Based on the GEM/CMA helpers | |
9 | * | |
10 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | |
11 | */ | |
12 | ||
13 | #include <linux/dma-buf.h> | |
14 | #include <linux/iommu.h> | |
15 | #include <drm/tegra_drm.h> | |
16 | ||
17 | #include "drm.h" | |
18 | #include "gem.h" | |
19 | ||
20 | static void tegra_bo_put(struct host1x_bo *bo) | |
21 | { | |
22 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); | |
23 | ||
24 | drm_gem_object_put_unlocked(&obj->gem); | |
25 | } | |
26 | ||
27 | static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) | |
28 | { | |
29 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); | |
30 | ||
31 | *sgt = obj->sgt; | |
32 | ||
33 | return obj->paddr; | |
34 | } | |
35 | ||
36 | static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) | |
37 | { | |
38 | } | |
39 | ||
40 | static void *tegra_bo_mmap(struct host1x_bo *bo) | |
41 | { | |
42 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); | |
43 | ||
44 | if (obj->vaddr) | |
45 | return obj->vaddr; | |
46 | else if (obj->gem.import_attach) | |
47 | return dma_buf_vmap(obj->gem.import_attach->dmabuf); | |
48 | else | |
49 | return vmap(obj->pages, obj->num_pages, VM_MAP, | |
50 | pgprot_writecombine(PAGE_KERNEL)); | |
51 | } | |
52 | ||
53 | static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) | |
54 | { | |
55 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); | |
56 | ||
57 | if (obj->vaddr) | |
58 | return; | |
59 | else if (obj->gem.import_attach) | |
60 | dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); | |
61 | else | |
62 | vunmap(addr); | |
63 | } | |
64 | ||
65 | static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) | |
66 | { | |
67 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); | |
68 | ||
69 | if (obj->vaddr) | |
70 | return obj->vaddr + page * PAGE_SIZE; | |
71 | else if (obj->gem.import_attach) | |
72 | return dma_buf_kmap(obj->gem.import_attach->dmabuf, page); | |
73 | else | |
74 | return vmap(obj->pages + page, 1, VM_MAP, | |
75 | pgprot_writecombine(PAGE_KERNEL)); | |
76 | } | |
77 | ||
78 | static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, | |
79 | void *addr) | |
80 | { | |
81 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); | |
82 | ||
83 | if (obj->vaddr) | |
84 | return; | |
85 | else if (obj->gem.import_attach) | |
86 | dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr); | |
87 | else | |
88 | vunmap(addr); | |
89 | } | |
90 | ||
91 | static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) | |
92 | { | |
93 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); | |
94 | ||
95 | drm_gem_object_get(&obj->gem); | |
96 | ||
97 | return bo; | |
98 | } | |
99 | ||
100 | static const struct host1x_bo_ops tegra_bo_ops = { | |
101 | .get = tegra_bo_get, | |
102 | .put = tegra_bo_put, | |
103 | .pin = tegra_bo_pin, | |
104 | .unpin = tegra_bo_unpin, | |
105 | .mmap = tegra_bo_mmap, | |
106 | .munmap = tegra_bo_munmap, | |
107 | .kmap = tegra_bo_kmap, | |
108 | .kunmap = tegra_bo_kunmap, | |
109 | }; | |
110 | ||
111 | static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) | |
112 | { | |
113 | int prot = IOMMU_READ | IOMMU_WRITE; | |
114 | int err; | |
115 | ||
116 | if (bo->mm) | |
117 | return -EBUSY; | |
118 | ||
119 | bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); | |
120 | if (!bo->mm) | |
121 | return -ENOMEM; | |
122 | ||
123 | mutex_lock(&tegra->mm_lock); | |
124 | ||
125 | err = drm_mm_insert_node_generic(&tegra->mm, | |
126 | bo->mm, bo->gem.size, PAGE_SIZE, 0, 0); | |
127 | if (err < 0) { | |
128 | dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n", | |
129 | err); | |
130 | goto unlock; | |
131 | } | |
132 | ||
133 | bo->paddr = bo->mm->start; | |
134 | ||
135 | bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, | |
136 | bo->sgt->nents, prot); | |
137 | if (!bo->size) { | |
138 | dev_err(tegra->drm->dev, "failed to map buffer\n"); | |
139 | err = -ENOMEM; | |
140 | goto remove; | |
141 | } | |
142 | ||
143 | mutex_unlock(&tegra->mm_lock); | |
144 | ||
145 | return 0; | |
146 | ||
147 | remove: | |
148 | drm_mm_remove_node(bo->mm); | |
149 | unlock: | |
150 | mutex_unlock(&tegra->mm_lock); | |
151 | kfree(bo->mm); | |
152 | return err; | |
153 | } | |
154 | ||
155 | static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) | |
156 | { | |
157 | if (!bo->mm) | |
158 | return 0; | |
159 | ||
160 | mutex_lock(&tegra->mm_lock); | |
161 | iommu_unmap(tegra->domain, bo->paddr, bo->size); | |
162 | drm_mm_remove_node(bo->mm); | |
163 | mutex_unlock(&tegra->mm_lock); | |
164 | ||
165 | kfree(bo->mm); | |
166 | ||
167 | return 0; | |
168 | } | |
169 | ||
170 | static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, | |
171 | size_t size) | |
172 | { | |
173 | struct tegra_bo *bo; | |
174 | int err; | |
175 | ||
176 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
177 | if (!bo) | |
178 | return ERR_PTR(-ENOMEM); | |
179 | ||
180 | host1x_bo_init(&bo->base, &tegra_bo_ops); | |
181 | size = round_up(size, PAGE_SIZE); | |
182 | ||
183 | err = drm_gem_object_init(drm, &bo->gem, size); | |
184 | if (err < 0) | |
185 | goto free; | |
186 | ||
187 | err = drm_gem_create_mmap_offset(&bo->gem); | |
188 | if (err < 0) | |
189 | goto release; | |
190 | ||
191 | return bo; | |
192 | ||
193 | release: | |
194 | drm_gem_object_release(&bo->gem); | |
195 | free: | |
196 | kfree(bo); | |
197 | return ERR_PTR(err); | |
198 | } | |
199 | ||
200 | static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) | |
201 | { | |
202 | if (bo->pages) { | |
203 | dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
204 | DMA_FROM_DEVICE); | |
205 | drm_gem_put_pages(&bo->gem, bo->pages, true, true); | |
206 | sg_free_table(bo->sgt); | |
207 | kfree(bo->sgt); | |
208 | } else if (bo->vaddr) { | |
209 | dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); | |
210 | } | |
211 | } | |
212 | ||
213 | static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) | |
214 | { | |
215 | int err; | |
216 | ||
217 | bo->pages = drm_gem_get_pages(&bo->gem); | |
218 | if (IS_ERR(bo->pages)) | |
219 | return PTR_ERR(bo->pages); | |
220 | ||
221 | bo->num_pages = bo->gem.size >> PAGE_SHIFT; | |
222 | ||
223 | bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); | |
224 | if (IS_ERR(bo->sgt)) { | |
225 | err = PTR_ERR(bo->sgt); | |
226 | goto put_pages; | |
227 | } | |
228 | ||
229 | err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
230 | DMA_FROM_DEVICE); | |
231 | if (err == 0) { | |
232 | err = -EFAULT; | |
233 | goto free_sgt; | |
234 | } | |
235 | ||
236 | return 0; | |
237 | ||
238 | free_sgt: | |
239 | sg_free_table(bo->sgt); | |
240 | kfree(bo->sgt); | |
241 | put_pages: | |
242 | drm_gem_put_pages(&bo->gem, bo->pages, false, false); | |
243 | return err; | |
244 | } | |
245 | ||
246 | static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) | |
247 | { | |
248 | struct tegra_drm *tegra = drm->dev_private; | |
249 | int err; | |
250 | ||
251 | if (tegra->domain) { | |
252 | err = tegra_bo_get_pages(drm, bo); | |
253 | if (err < 0) | |
254 | return err; | |
255 | ||
256 | err = tegra_bo_iommu_map(tegra, bo); | |
257 | if (err < 0) { | |
258 | tegra_bo_free(drm, bo); | |
259 | return err; | |
260 | } | |
261 | } else { | |
262 | size_t size = bo->gem.size; | |
263 | ||
264 | bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, | |
265 | GFP_KERNEL | __GFP_NOWARN); | |
266 | if (!bo->vaddr) { | |
267 | dev_err(drm->dev, | |
268 | "failed to allocate buffer of size %zu\n", | |
269 | size); | |
270 | return -ENOMEM; | |
271 | } | |
272 | } | |
273 | ||
274 | return 0; | |
275 | } | |
276 | ||
277 | struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, | |
278 | unsigned long flags) | |
279 | { | |
280 | struct tegra_bo *bo; | |
281 | int err; | |
282 | ||
283 | bo = tegra_bo_alloc_object(drm, size); | |
284 | if (IS_ERR(bo)) | |
285 | return bo; | |
286 | ||
287 | err = tegra_bo_alloc(drm, bo); | |
288 | if (err < 0) | |
289 | goto release; | |
290 | ||
291 | if (flags & DRM_TEGRA_GEM_CREATE_TILED) | |
292 | bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; | |
293 | ||
294 | if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) | |
295 | bo->flags |= TEGRA_BO_BOTTOM_UP; | |
296 | ||
297 | return bo; | |
298 | ||
299 | release: | |
300 | drm_gem_object_release(&bo->gem); | |
301 | kfree(bo); | |
302 | return ERR_PTR(err); | |
303 | } | |
304 | ||
305 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, | |
306 | struct drm_device *drm, | |
307 | size_t size, | |
308 | unsigned long flags, | |
309 | u32 *handle) | |
310 | { | |
311 | struct tegra_bo *bo; | |
312 | int err; | |
313 | ||
314 | bo = tegra_bo_create(drm, size, flags); | |
315 | if (IS_ERR(bo)) | |
316 | return bo; | |
317 | ||
318 | err = drm_gem_handle_create(file, &bo->gem, handle); | |
319 | if (err) { | |
320 | tegra_bo_free_object(&bo->gem); | |
321 | return ERR_PTR(err); | |
322 | } | |
323 | ||
324 | drm_gem_object_put_unlocked(&bo->gem); | |
325 | ||
326 | return bo; | |
327 | } | |
328 | ||
329 | static struct tegra_bo *tegra_bo_import(struct drm_device *drm, | |
330 | struct dma_buf *buf) | |
331 | { | |
332 | struct tegra_drm *tegra = drm->dev_private; | |
333 | struct dma_buf_attachment *attach; | |
334 | struct tegra_bo *bo; | |
335 | int err; | |
336 | ||
337 | bo = tegra_bo_alloc_object(drm, buf->size); | |
338 | if (IS_ERR(bo)) | |
339 | return bo; | |
340 | ||
341 | attach = dma_buf_attach(buf, drm->dev); | |
342 | if (IS_ERR(attach)) { | |
343 | err = PTR_ERR(attach); | |
344 | goto free; | |
345 | } | |
346 | ||
347 | get_dma_buf(buf); | |
348 | ||
349 | bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); | |
350 | if (IS_ERR(bo->sgt)) { | |
351 | err = PTR_ERR(bo->sgt); | |
352 | goto detach; | |
353 | } | |
354 | ||
355 | if (tegra->domain) { | |
356 | err = tegra_bo_iommu_map(tegra, bo); | |
357 | if (err < 0) | |
358 | goto detach; | |
359 | } else { | |
360 | if (bo->sgt->nents > 1) { | |
361 | err = -EINVAL; | |
362 | goto detach; | |
363 | } | |
364 | ||
365 | bo->paddr = sg_dma_address(bo->sgt->sgl); | |
366 | } | |
367 | ||
368 | bo->gem.import_attach = attach; | |
369 | ||
370 | return bo; | |
371 | ||
372 | detach: | |
373 | if (!IS_ERR_OR_NULL(bo->sgt)) | |
374 | dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); | |
375 | ||
376 | dma_buf_detach(buf, attach); | |
377 | dma_buf_put(buf); | |
378 | free: | |
379 | drm_gem_object_release(&bo->gem); | |
380 | kfree(bo); | |
381 | return ERR_PTR(err); | |
382 | } | |
383 | ||
384 | void tegra_bo_free_object(struct drm_gem_object *gem) | |
385 | { | |
386 | struct tegra_drm *tegra = gem->dev->dev_private; | |
387 | struct tegra_bo *bo = to_tegra_bo(gem); | |
388 | ||
389 | if (tegra->domain) | |
390 | tegra_bo_iommu_unmap(tegra, bo); | |
391 | ||
392 | if (gem->import_attach) { | |
393 | dma_buf_unmap_attachment(gem->import_attach, bo->sgt, | |
394 | DMA_TO_DEVICE); | |
395 | drm_prime_gem_destroy(gem, NULL); | |
396 | } else { | |
397 | tegra_bo_free(gem->dev, bo); | |
398 | } | |
399 | ||
400 | drm_gem_object_release(gem); | |
401 | kfree(bo); | |
402 | } | |
403 | ||
404 | int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, | |
405 | struct drm_mode_create_dumb *args) | |
406 | { | |
407 | unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); | |
408 | struct tegra_drm *tegra = drm->dev_private; | |
409 | struct tegra_bo *bo; | |
410 | ||
411 | args->pitch = round_up(min_pitch, tegra->pitch_align); | |
412 | args->size = args->pitch * args->height; | |
413 | ||
414 | bo = tegra_bo_create_with_handle(file, drm, args->size, 0, | |
415 | &args->handle); | |
416 | if (IS_ERR(bo)) | |
417 | return PTR_ERR(bo); | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
422 | static vm_fault_t tegra_bo_fault(struct vm_fault *vmf) | |
423 | { | |
424 | struct vm_area_struct *vma = vmf->vma; | |
425 | struct drm_gem_object *gem = vma->vm_private_data; | |
426 | struct tegra_bo *bo = to_tegra_bo(gem); | |
427 | struct page *page; | |
428 | pgoff_t offset; | |
429 | ||
430 | if (!bo->pages) | |
431 | return VM_FAULT_SIGBUS; | |
432 | ||
433 | offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; | |
434 | page = bo->pages[offset]; | |
435 | ||
436 | return vmf_insert_page(vma, vmf->address, page); | |
437 | } | |
438 | ||
439 | const struct vm_operations_struct tegra_bo_vm_ops = { | |
440 | .fault = tegra_bo_fault, | |
441 | .open = drm_gem_vm_open, | |
442 | .close = drm_gem_vm_close, | |
443 | }; | |
444 | ||
445 | int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) | |
446 | { | |
447 | struct tegra_bo *bo = to_tegra_bo(gem); | |
448 | ||
449 | if (!bo->pages) { | |
450 | unsigned long vm_pgoff = vma->vm_pgoff; | |
451 | int err; | |
452 | ||
453 | /* | |
454 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), | |
455 | * and set the vm_pgoff (used as a fake buffer offset by DRM) | |
456 | * to 0 as we want to map the whole buffer. | |
457 | */ | |
458 | vma->vm_flags &= ~VM_PFNMAP; | |
459 | vma->vm_pgoff = 0; | |
460 | ||
461 | err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, | |
462 | gem->size); | |
463 | if (err < 0) { | |
464 | drm_gem_vm_close(vma); | |
465 | return err; | |
466 | } | |
467 | ||
468 | vma->vm_pgoff = vm_pgoff; | |
469 | } else { | |
470 | pgprot_t prot = vm_get_page_prot(vma->vm_flags); | |
471 | ||
472 | vma->vm_flags |= VM_MIXEDMAP; | |
473 | vma->vm_flags &= ~VM_PFNMAP; | |
474 | ||
475 | vma->vm_page_prot = pgprot_writecombine(prot); | |
476 | } | |
477 | ||
478 | return 0; | |
479 | } | |
480 | ||
481 | int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) | |
482 | { | |
483 | struct drm_gem_object *gem; | |
484 | int err; | |
485 | ||
486 | err = drm_gem_mmap(file, vma); | |
487 | if (err < 0) | |
488 | return err; | |
489 | ||
490 | gem = vma->vm_private_data; | |
491 | ||
492 | return __tegra_gem_mmap(gem, vma); | |
493 | } | |
494 | ||
495 | static struct sg_table * | |
496 | tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, | |
497 | enum dma_data_direction dir) | |
498 | { | |
499 | struct drm_gem_object *gem = attach->dmabuf->priv; | |
500 | struct tegra_bo *bo = to_tegra_bo(gem); | |
501 | struct sg_table *sgt; | |
502 | ||
503 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
504 | if (!sgt) | |
505 | return NULL; | |
506 | ||
507 | if (bo->pages) { | |
508 | struct scatterlist *sg; | |
509 | unsigned int i; | |
510 | ||
511 | if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) | |
512 | goto free; | |
513 | ||
514 | for_each_sg(sgt->sgl, sg, bo->num_pages, i) | |
515 | sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); | |
516 | ||
517 | if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) | |
518 | goto free; | |
519 | } else { | |
520 | if (sg_alloc_table(sgt, 1, GFP_KERNEL)) | |
521 | goto free; | |
522 | ||
523 | sg_dma_address(sgt->sgl) = bo->paddr; | |
524 | sg_dma_len(sgt->sgl) = gem->size; | |
525 | } | |
526 | ||
527 | return sgt; | |
528 | ||
529 | free: | |
530 | sg_free_table(sgt); | |
531 | kfree(sgt); | |
532 | return NULL; | |
533 | } | |
534 | ||
535 | static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, | |
536 | struct sg_table *sgt, | |
537 | enum dma_data_direction dir) | |
538 | { | |
539 | struct drm_gem_object *gem = attach->dmabuf->priv; | |
540 | struct tegra_bo *bo = to_tegra_bo(gem); | |
541 | ||
542 | if (bo->pages) | |
543 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); | |
544 | ||
545 | sg_free_table(sgt); | |
546 | kfree(sgt); | |
547 | } | |
548 | ||
549 | static void tegra_gem_prime_release(struct dma_buf *buf) | |
550 | { | |
551 | drm_gem_dmabuf_release(buf); | |
552 | } | |
553 | ||
554 | static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, | |
555 | enum dma_data_direction direction) | |
556 | { | |
557 | struct drm_gem_object *gem = buf->priv; | |
558 | struct tegra_bo *bo = to_tegra_bo(gem); | |
559 | struct drm_device *drm = gem->dev; | |
560 | ||
561 | if (bo->pages) | |
562 | dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
563 | DMA_FROM_DEVICE); | |
564 | ||
565 | return 0; | |
566 | } | |
567 | ||
568 | static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, | |
569 | enum dma_data_direction direction) | |
570 | { | |
571 | struct drm_gem_object *gem = buf->priv; | |
572 | struct tegra_bo *bo = to_tegra_bo(gem); | |
573 | struct drm_device *drm = gem->dev; | |
574 | ||
575 | if (bo->pages) | |
576 | dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, | |
577 | DMA_TO_DEVICE); | |
578 | ||
579 | return 0; | |
580 | } | |
581 | ||
582 | static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) | |
583 | { | |
584 | return NULL; | |
585 | } | |
586 | ||
587 | static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, | |
588 | void *addr) | |
589 | { | |
590 | } | |
591 | ||
592 | static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) | |
593 | { | |
594 | struct drm_gem_object *gem = buf->priv; | |
595 | int err; | |
596 | ||
597 | err = drm_gem_mmap_obj(gem, gem->size, vma); | |
598 | if (err < 0) | |
599 | return err; | |
600 | ||
601 | return __tegra_gem_mmap(gem, vma); | |
602 | } | |
603 | ||
604 | static void *tegra_gem_prime_vmap(struct dma_buf *buf) | |
605 | { | |
606 | struct drm_gem_object *gem = buf->priv; | |
607 | struct tegra_bo *bo = to_tegra_bo(gem); | |
608 | ||
609 | return bo->vaddr; | |
610 | } | |
611 | ||
612 | static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) | |
613 | { | |
614 | } | |
615 | ||
616 | static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { | |
617 | .map_dma_buf = tegra_gem_prime_map_dma_buf, | |
618 | .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, | |
619 | .release = tegra_gem_prime_release, | |
620 | .begin_cpu_access = tegra_gem_prime_begin_cpu_access, | |
621 | .end_cpu_access = tegra_gem_prime_end_cpu_access, | |
622 | .map = tegra_gem_prime_kmap, | |
623 | .unmap = tegra_gem_prime_kunmap, | |
624 | .mmap = tegra_gem_prime_mmap, | |
625 | .vmap = tegra_gem_prime_vmap, | |
626 | .vunmap = tegra_gem_prime_vunmap, | |
627 | }; | |
628 | ||
629 | struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, | |
630 | struct drm_gem_object *gem, | |
631 | int flags) | |
632 | { | |
633 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); | |
634 | ||
635 | exp_info.exp_name = KBUILD_MODNAME; | |
636 | exp_info.owner = drm->driver->fops->owner; | |
637 | exp_info.ops = &tegra_gem_prime_dmabuf_ops; | |
638 | exp_info.size = gem->size; | |
639 | exp_info.flags = flags; | |
640 | exp_info.priv = gem; | |
641 | ||
642 | return drm_gem_dmabuf_export(drm, &exp_info); | |
643 | } | |
644 | ||
645 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, | |
646 | struct dma_buf *buf) | |
647 | { | |
648 | struct tegra_bo *bo; | |
649 | ||
650 | if (buf->ops == &tegra_gem_prime_dmabuf_ops) { | |
651 | struct drm_gem_object *gem = buf->priv; | |
652 | ||
653 | if (gem->dev == drm) { | |
654 | drm_gem_object_get(gem); | |
655 | return gem; | |
656 | } | |
657 | } | |
658 | ||
659 | bo = tegra_bo_import(drm, buf); | |
660 | if (IS_ERR(bo)) | |
661 | return ERR_CAST(bo); | |
662 | ||
663 | return &bo->gem; | |
664 | } |