]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
Merge tag 'for-v4.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27
28 static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
29 {
30 unsigned i, j;
31 int r;
32
33 memset(ctx, 0, sizeof(*ctx));
34 ctx->adev = adev;
35 kref_init(&ctx->refcount);
36 spin_lock_init(&ctx->ring_lock);
37 ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
38 sizeof(struct dma_fence*), GFP_KERNEL);
39 if (!ctx->fences)
40 return -ENOMEM;
41
42 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
43 ctx->rings[i].sequence = 1;
44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
45 }
46
47 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
48
49 /* create context entity for each ring */
50 for (i = 0; i < adev->num_rings; i++) {
51 struct amdgpu_ring *ring = adev->rings[i];
52 struct amd_sched_rq *rq;
53
54 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
55
56 if (ring == &adev->gfx.kiq.ring)
57 continue;
58
59 r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
60 rq, amdgpu_sched_jobs);
61 if (r)
62 goto failed;
63 }
64
65 r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
66 if (r)
67 goto failed;
68
69 return 0;
70
71 failed:
72 for (j = 0; j < i; j++)
73 amd_sched_entity_fini(&adev->rings[j]->sched,
74 &ctx->rings[j].entity);
75 kfree(ctx->fences);
76 ctx->fences = NULL;
77 return r;
78 }
79
80 static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
81 {
82 struct amdgpu_device *adev = ctx->adev;
83 unsigned i, j;
84
85 if (!adev)
86 return;
87
88 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
89 for (j = 0; j < amdgpu_sched_jobs; ++j)
90 dma_fence_put(ctx->rings[i].fences[j]);
91 kfree(ctx->fences);
92 ctx->fences = NULL;
93
94 for (i = 0; i < adev->num_rings; i++)
95 amd_sched_entity_fini(&adev->rings[i]->sched,
96 &ctx->rings[i].entity);
97
98 amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
99 }
100
101 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
102 struct amdgpu_fpriv *fpriv,
103 uint32_t *id)
104 {
105 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
106 struct amdgpu_ctx *ctx;
107 int r;
108
109 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
110 if (!ctx)
111 return -ENOMEM;
112
113 mutex_lock(&mgr->lock);
114 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
115 if (r < 0) {
116 mutex_unlock(&mgr->lock);
117 kfree(ctx);
118 return r;
119 }
120 *id = (uint32_t)r;
121 r = amdgpu_ctx_init(adev, ctx);
122 if (r) {
123 idr_remove(&mgr->ctx_handles, *id);
124 *id = 0;
125 kfree(ctx);
126 }
127 mutex_unlock(&mgr->lock);
128 return r;
129 }
130
131 static void amdgpu_ctx_do_release(struct kref *ref)
132 {
133 struct amdgpu_ctx *ctx;
134
135 ctx = container_of(ref, struct amdgpu_ctx, refcount);
136
137 amdgpu_ctx_fini(ctx);
138
139 kfree(ctx);
140 }
141
142 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
143 {
144 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
145 struct amdgpu_ctx *ctx;
146
147 mutex_lock(&mgr->lock);
148 ctx = idr_remove(&mgr->ctx_handles, id);
149 if (ctx)
150 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
151 mutex_unlock(&mgr->lock);
152 return ctx ? 0 : -EINVAL;
153 }
154
155 static int amdgpu_ctx_query(struct amdgpu_device *adev,
156 struct amdgpu_fpriv *fpriv, uint32_t id,
157 union drm_amdgpu_ctx_out *out)
158 {
159 struct amdgpu_ctx *ctx;
160 struct amdgpu_ctx_mgr *mgr;
161 unsigned reset_counter;
162
163 if (!fpriv)
164 return -EINVAL;
165
166 mgr = &fpriv->ctx_mgr;
167 mutex_lock(&mgr->lock);
168 ctx = idr_find(&mgr->ctx_handles, id);
169 if (!ctx) {
170 mutex_unlock(&mgr->lock);
171 return -EINVAL;
172 }
173
174 /* TODO: these two are always zero */
175 out->state.flags = 0x0;
176 out->state.hangs = 0x0;
177
178 /* determine if a GPU reset has occured since the last call */
179 reset_counter = atomic_read(&adev->gpu_reset_counter);
180 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
181 if (ctx->reset_counter == reset_counter)
182 out->state.reset_status = AMDGPU_CTX_NO_RESET;
183 else
184 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
185 ctx->reset_counter = reset_counter;
186
187 mutex_unlock(&mgr->lock);
188 return 0;
189 }
190
191 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
192 struct drm_file *filp)
193 {
194 int r;
195 uint32_t id;
196
197 union drm_amdgpu_ctx *args = data;
198 struct amdgpu_device *adev = dev->dev_private;
199 struct amdgpu_fpriv *fpriv = filp->driver_priv;
200
201 r = 0;
202 id = args->in.ctx_id;
203
204 switch (args->in.op) {
205 case AMDGPU_CTX_OP_ALLOC_CTX:
206 r = amdgpu_ctx_alloc(adev, fpriv, &id);
207 args->out.alloc.ctx_id = id;
208 break;
209 case AMDGPU_CTX_OP_FREE_CTX:
210 r = amdgpu_ctx_free(fpriv, id);
211 break;
212 case AMDGPU_CTX_OP_QUERY_STATE:
213 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
214 break;
215 default:
216 return -EINVAL;
217 }
218
219 return r;
220 }
221
222 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
223 {
224 struct amdgpu_ctx *ctx;
225 struct amdgpu_ctx_mgr *mgr;
226
227 if (!fpriv)
228 return NULL;
229
230 mgr = &fpriv->ctx_mgr;
231
232 mutex_lock(&mgr->lock);
233 ctx = idr_find(&mgr->ctx_handles, id);
234 if (ctx)
235 kref_get(&ctx->refcount);
236 mutex_unlock(&mgr->lock);
237 return ctx;
238 }
239
240 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
241 {
242 if (ctx == NULL)
243 return -EINVAL;
244
245 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
246 return 0;
247 }
248
249 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
250 struct dma_fence *fence)
251 {
252 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
253 uint64_t seq = cring->sequence;
254 unsigned idx = 0;
255 struct dma_fence *other = NULL;
256
257 idx = seq & (amdgpu_sched_jobs - 1);
258 other = cring->fences[idx];
259 if (other) {
260 signed long r;
261 r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
262 if (r < 0)
263 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
264 }
265
266 dma_fence_get(fence);
267
268 spin_lock(&ctx->ring_lock);
269 cring->fences[idx] = fence;
270 cring->sequence++;
271 spin_unlock(&ctx->ring_lock);
272
273 dma_fence_put(other);
274
275 return seq;
276 }
277
278 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
279 struct amdgpu_ring *ring, uint64_t seq)
280 {
281 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
282 struct dma_fence *fence;
283
284 spin_lock(&ctx->ring_lock);
285
286 if (seq == ~0ull)
287 seq = ctx->rings[ring->idx].sequence - 1;
288
289 if (seq >= cring->sequence) {
290 spin_unlock(&ctx->ring_lock);
291 return ERR_PTR(-EINVAL);
292 }
293
294
295 if (seq + amdgpu_sched_jobs < cring->sequence) {
296 spin_unlock(&ctx->ring_lock);
297 return NULL;
298 }
299
300 fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
301 spin_unlock(&ctx->ring_lock);
302
303 return fence;
304 }
305
306 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
307 {
308 mutex_init(&mgr->lock);
309 idr_init(&mgr->ctx_handles);
310 }
311
312 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
313 {
314 struct amdgpu_ctx *ctx;
315 struct idr *idp;
316 uint32_t id;
317
318 idp = &mgr->ctx_handles;
319
320 idr_for_each_entry(idp, ctx, id) {
321 if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
322 DRM_ERROR("ctx %p is still alive\n", ctx);
323 }
324
325 idr_destroy(&mgr->ctx_handles);
326 mutex_destroy(&mgr->lock);
327 }