]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2012 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * based on nouveau_prime.c | |
23 | * | |
24 | * Authors: Alex Deucher | |
25 | */ | |
26 | #include <drm/drmP.h> | |
27 | ||
28 | #include "amdgpu.h" | |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include <linux/dma-buf.h> | |
31 | ||
32 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) | |
33 | { | |
34 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | |
35 | int npages = bo->tbo.num_pages; | |
36 | ||
37 | return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); | |
38 | } | |
39 | ||
40 | void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) | |
41 | { | |
42 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | |
43 | int ret; | |
44 | ||
45 | ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, | |
46 | &bo->dma_buf_vmap); | |
47 | if (ret) | |
48 | return ERR_PTR(ret); | |
49 | ||
50 | return bo->dma_buf_vmap.virtual; | |
51 | } | |
52 | ||
53 | void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | |
54 | { | |
55 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | |
56 | ||
57 | ttm_bo_kunmap(&bo->dma_buf_vmap); | |
58 | } | |
59 | ||
4d9c514d CK |
60 | struct drm_gem_object * |
61 | amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |
62 | struct dma_buf_attachment *attach, | |
63 | struct sg_table *sg) | |
d38ceaf9 | 64 | { |
72d7668b | 65 | struct reservation_object *resv = attach->dmabuf->resv; |
d38ceaf9 AD |
66 | struct amdgpu_device *adev = dev->dev_private; |
67 | struct amdgpu_bo *bo; | |
68 | int ret; | |
69 | ||
72d7668b | 70 | ww_mutex_lock(&resv->lock, NULL); |
d38ceaf9 | 71 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, |
72d7668b CK |
72 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); |
73 | ww_mutex_unlock(&resv->lock); | |
d38ceaf9 AD |
74 | if (ret) |
75 | return ERR_PTR(ret); | |
76 | ||
8e94a46c | 77 | bo->prime_shared_count = 1; |
d38ceaf9 AD |
78 | return &bo->gem_base; |
79 | } | |
80 | ||
81 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj) | |
82 | { | |
83 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | |
8e94a46c | 84 | long ret = 0; |
d38ceaf9 AD |
85 | |
86 | ret = amdgpu_bo_reserve(bo, false); | |
87 | if (unlikely(ret != 0)) | |
88 | return ret; | |
89 | ||
8e94a46c MK |
90 | /* |
91 | * Wait for all shared fences to complete before we switch to future | |
92 | * use of exclusive fence on this prime shared bo. | |
93 | */ | |
94 | ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, | |
95 | MAX_SCHEDULE_TIMEOUT); | |
96 | if (unlikely(ret < 0)) { | |
97 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); | |
98 | amdgpu_bo_unreserve(bo); | |
99 | return ret; | |
100 | } | |
101 | ||
d38ceaf9 AD |
102 | /* pin buffer into GTT */ |
103 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); | |
8e94a46c MK |
104 | if (likely(ret == 0)) |
105 | bo->prime_shared_count++; | |
106 | ||
d38ceaf9 AD |
107 | amdgpu_bo_unreserve(bo); |
108 | return ret; | |
109 | } | |
110 | ||
111 | void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) | |
112 | { | |
113 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | |
114 | int ret = 0; | |
115 | ||
116 | ret = amdgpu_bo_reserve(bo, false); | |
117 | if (unlikely(ret != 0)) | |
118 | return; | |
119 | ||
120 | amdgpu_bo_unpin(bo); | |
8e94a46c MK |
121 | if (bo->prime_shared_count) |
122 | bo->prime_shared_count--; | |
d38ceaf9 AD |
123 | amdgpu_bo_unreserve(bo); |
124 | } | |
125 | ||
126 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) | |
127 | { | |
128 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | |
129 | ||
130 | return bo->tbo.resv; | |
131 | } | |
132 | ||
133 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, | |
134 | struct drm_gem_object *gobj, | |
135 | int flags) | |
136 | { | |
137 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); | |
138 | ||
cc325d19 | 139 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) |
d38ceaf9 AD |
140 | return ERR_PTR(-EPERM); |
141 | ||
142 | return drm_gem_prime_export(dev, gobj, flags); | |
143 | } |