2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * based on nouveau_prime.c
24 * Authors: Alex Deucher
28 * DOC: PRIME Buffer Sharing
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
37 #include "amdgpu_display.h"
38 #include "amdgpu_gem.h"
39 #include <drm/amdgpu_drm.h>
40 #include <linux/dma-buf.h>
42 static const struct dma_buf_ops amdgpu_dmabuf_ops
;
45 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
47 * @obj: GEM buffer object (BO)
50 * A scatter/gather table for the pinned pages of the BO's memory.
52 struct sg_table
*amdgpu_gem_prime_get_sg_table(struct drm_gem_object
*obj
)
54 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
55 int npages
= bo
->tbo
.num_pages
;
57 return drm_prime_pages_to_sg(bo
->tbo
.ttm
->pages
, npages
);
61 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
64 * Sets up an in-kernel virtual mapping of the BO's memory.
67 * The virtual address of the mapping or an error pointer.
69 void *amdgpu_gem_prime_vmap(struct drm_gem_object
*obj
)
71 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
74 ret
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
,
79 return bo
->dma_buf_vmap
.virtual;
83 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
85 * @vaddr: Virtual address (unused)
87 * Tears down the in-kernel virtual mapping of the BO's memory.
89 void amdgpu_gem_prime_vunmap(struct drm_gem_object
*obj
, void *vaddr
)
91 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
93 ttm_bo_kunmap(&bo
->dma_buf_vmap
);
97 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
99 * @vma: Virtual memory area
101 * Sets up a userspace mapping of the BO's memory in the given
102 * virtual memory area.
105 * 0 on success or a negative error code on failure.
107 int amdgpu_gem_prime_mmap(struct drm_gem_object
*obj
, struct vm_area_struct
*vma
)
109 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
110 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
111 unsigned asize
= amdgpu_bo_size(bo
);
120 /* Check for valid size. */
121 if (asize
< vma
->vm_end
- vma
->vm_start
)
124 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
) ||
125 (bo
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
128 vma
->vm_pgoff
+= amdgpu_bo_mmap_offset(bo
) >> PAGE_SHIFT
;
130 /* prime mmap does not need to check access, so allow here */
131 ret
= drm_vma_node_allow(&obj
->vma_node
, vma
->vm_file
->private_data
);
135 ret
= ttm_bo_mmap(vma
->vm_file
, vma
, &adev
->mman
.bdev
);
136 drm_vma_node_revoke(&obj
->vma_node
, vma
->vm_file
->private_data
);
142 * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
145 * @attach: DMA-buf attachment
146 * @sg: Scatter/gather table
148 * Imports shared DMA buffer memory exported by another device.
151 * A new GEM BO of the given DRM device, representing the memory
152 * described by the given DMA-buf attachment and scatter/gather table.
154 struct drm_gem_object
*
155 amdgpu_gem_prime_import_sg_table(struct drm_device
*dev
,
156 struct dma_buf_attachment
*attach
,
159 struct reservation_object
*resv
= attach
->dmabuf
->resv
;
160 struct amdgpu_device
*adev
= dev
->dev_private
;
161 struct amdgpu_bo
*bo
;
162 struct amdgpu_bo_param bp
;
165 memset(&bp
, 0, sizeof(bp
));
166 bp
.size
= attach
->dmabuf
->size
;
167 bp
.byte_align
= PAGE_SIZE
;
168 bp
.domain
= AMDGPU_GEM_DOMAIN_CPU
;
170 bp
.type
= ttm_bo_type_sg
;
172 ww_mutex_lock(&resv
->lock
, NULL
);
173 ret
= amdgpu_bo_create(adev
, &bp
, &bo
);
178 bo
->tbo
.ttm
->sg
= sg
;
179 bo
->allowed_domains
= AMDGPU_GEM_DOMAIN_GTT
;
180 bo
->preferred_domains
= AMDGPU_GEM_DOMAIN_GTT
;
181 if (attach
->dmabuf
->ops
!= &amdgpu_dmabuf_ops
)
182 bo
->prime_shared_count
= 1;
184 ww_mutex_unlock(&resv
->lock
);
185 return &bo
->gem_base
;
188 ww_mutex_unlock(&resv
->lock
);
193 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
194 * @dma_buf: Shared DMA buffer
195 * @attach: DMA-buf attachment
197 * Makes sure that the shared DMA buffer can be accessed by the target device.
198 * For now, simply pins it to the GTT domain, where it should be accessible by
202 * 0 on success or a negative error code on failure.
204 static int amdgpu_gem_map_attach(struct dma_buf
*dma_buf
,
205 struct dma_buf_attachment
*attach
)
207 struct drm_gem_object
*obj
= dma_buf
->priv
;
208 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
209 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
212 r
= drm_gem_map_attach(dma_buf
, attach
);
216 r
= amdgpu_bo_reserve(bo
, false);
217 if (unlikely(r
!= 0))
221 if (attach
->dev
->driver
!= adev
->dev
->driver
) {
223 * Wait for all shared fences to complete before we switch to future
224 * use of exclusive fence on this prime shared bo.
226 r
= reservation_object_wait_timeout_rcu(bo
->tbo
.resv
,
228 MAX_SCHEDULE_TIMEOUT
);
229 if (unlikely(r
< 0)) {
230 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r
);
231 goto error_unreserve
;
235 /* pin buffer into GTT */
236 r
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
238 goto error_unreserve
;
240 if (attach
->dev
->driver
!= adev
->dev
->driver
)
241 bo
->prime_shared_count
++;
244 amdgpu_bo_unreserve(bo
);
248 drm_gem_map_detach(dma_buf
, attach
);
253 * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
254 * @dma_buf: Shared DMA buffer
255 * @attach: DMA-buf attachment
257 * This is called when a shared DMA buffer no longer needs to be accessible by
258 * another device. For now, simply unpins the buffer from GTT.
260 static void amdgpu_gem_map_detach(struct dma_buf
*dma_buf
,
261 struct dma_buf_attachment
*attach
)
263 struct drm_gem_object
*obj
= dma_buf
->priv
;
264 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
265 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
268 ret
= amdgpu_bo_reserve(bo
, true);
269 if (unlikely(ret
!= 0))
273 if (attach
->dev
->driver
!= adev
->dev
->driver
&& bo
->prime_shared_count
)
274 bo
->prime_shared_count
--;
275 amdgpu_bo_unreserve(bo
);
278 drm_gem_map_detach(dma_buf
, attach
);
282 * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
286 * The BO's reservation object.
288 struct reservation_object
*amdgpu_gem_prime_res_obj(struct drm_gem_object
*obj
)
290 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
296 * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
297 * @dma_buf: Shared DMA buffer
298 * @direction: Direction of DMA transfer
300 * This is called before CPU access to the shared DMA buffer's memory. If it's
301 * a read access, the buffer is moved to the GTT domain if possible, for optimal
302 * CPU read performance.
305 * 0 on success or a negative error code on failure.
307 static int amdgpu_gem_begin_cpu_access(struct dma_buf
*dma_buf
,
308 enum dma_data_direction direction
)
310 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(dma_buf
->priv
);
311 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
312 struct ttm_operation_ctx ctx
= { true, false };
313 u32 domain
= amdgpu_display_supported_domains(adev
);
315 bool reads
= (direction
== DMA_BIDIRECTIONAL
||
316 direction
== DMA_FROM_DEVICE
);
318 if (!reads
|| !(domain
& AMDGPU_GEM_DOMAIN_GTT
))
322 ret
= amdgpu_bo_reserve(bo
, false);
323 if (unlikely(ret
!= 0))
326 if (!bo
->pin_count
&& (bo
->allowed_domains
& AMDGPU_GEM_DOMAIN_GTT
)) {
327 amdgpu_bo_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
328 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
331 amdgpu_bo_unreserve(bo
);
335 static const struct dma_buf_ops amdgpu_dmabuf_ops
= {
336 .attach
= amdgpu_gem_map_attach
,
337 .detach
= amdgpu_gem_map_detach
,
338 .map_dma_buf
= drm_gem_map_dma_buf
,
339 .unmap_dma_buf
= drm_gem_unmap_dma_buf
,
340 .release
= drm_gem_dmabuf_release
,
341 .begin_cpu_access
= amdgpu_gem_begin_cpu_access
,
342 .map
= drm_gem_dmabuf_kmap
,
343 .unmap
= drm_gem_dmabuf_kunmap
,
344 .mmap
= drm_gem_dmabuf_mmap
,
345 .vmap
= drm_gem_dmabuf_vmap
,
346 .vunmap
= drm_gem_dmabuf_vunmap
,
350 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
353 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
355 * The main work is done by the &drm_gem_prime_export helper, which in turn
356 * uses &amdgpu_gem_prime_res_obj.
359 * Shared DMA buffer representing the GEM BO from the given device.
361 struct dma_buf
*amdgpu_gem_prime_export(struct drm_device
*dev
,
362 struct drm_gem_object
*gobj
,
365 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(gobj
);
368 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
) ||
369 bo
->flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
)
370 return ERR_PTR(-EPERM
);
372 buf
= drm_gem_prime_export(dev
, gobj
, flags
);
374 buf
->file
->f_mapping
= dev
->anon_inode
->i_mapping
;
375 buf
->ops
= &amdgpu_dmabuf_ops
;
382 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
384 * @dma_buf: Shared DMA buffer
386 * The main work is done by the &drm_gem_prime_import helper, which in turn
387 * uses &amdgpu_gem_prime_import_sg_table.
390 * GEM BO representing the shared DMA buffer for the given device.
392 struct drm_gem_object
*amdgpu_gem_prime_import(struct drm_device
*dev
,
393 struct dma_buf
*dma_buf
)
395 struct drm_gem_object
*obj
;
397 if (dma_buf
->ops
== &amdgpu_dmabuf_ops
) {
399 if (obj
->dev
== dev
) {
401 * Importing dmabuf exported from out own gem increases
402 * refcount on gem itself instead of f_count of dmabuf.
404 drm_gem_object_get(obj
);
409 return drm_gem_prime_import(dev
, dma_buf
);