2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: monk liu <monk.liu@amd.com>
28 static int amdgpu_ctx_init(struct amdgpu_device
*adev
, struct amdgpu_ctx
*ctx
)
33 memset(ctx
, 0, sizeof(*ctx
));
35 kref_init(&ctx
->refcount
);
36 spin_lock_init(&ctx
->ring_lock
);
37 ctx
->fences
= kcalloc(amdgpu_sched_jobs
* AMDGPU_MAX_RINGS
,
38 sizeof(struct dma_fence
*), GFP_KERNEL
);
42 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
43 ctx
->rings
[i
].sequence
= 1;
44 ctx
->rings
[i
].fences
= &ctx
->fences
[amdgpu_sched_jobs
* i
];
47 ctx
->reset_counter
= atomic_read(&adev
->gpu_reset_counter
);
49 /* create context entity for each ring */
50 for (i
= 0; i
< adev
->num_rings
; i
++) {
51 struct amdgpu_ring
*ring
= adev
->rings
[i
];
52 struct amd_sched_rq
*rq
;
54 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_NORMAL
];
56 if (ring
== &adev
->gfx
.kiq
.ring
)
59 r
= amd_sched_entity_init(&ring
->sched
, &ctx
->rings
[i
].entity
,
60 rq
, amdgpu_sched_jobs
);
65 r
= amdgpu_queue_mgr_init(adev
, &ctx
->queue_mgr
);
72 for (j
= 0; j
< i
; j
++)
73 amd_sched_entity_fini(&adev
->rings
[j
]->sched
,
74 &ctx
->rings
[j
].entity
);
80 static void amdgpu_ctx_fini(struct amdgpu_ctx
*ctx
)
82 struct amdgpu_device
*adev
= ctx
->adev
;
88 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
89 for (j
= 0; j
< amdgpu_sched_jobs
; ++j
)
90 dma_fence_put(ctx
->rings
[i
].fences
[j
]);
94 for (i
= 0; i
< adev
->num_rings
; i
++)
95 amd_sched_entity_fini(&adev
->rings
[i
]->sched
,
96 &ctx
->rings
[i
].entity
);
98 amdgpu_queue_mgr_fini(adev
, &ctx
->queue_mgr
);
101 static int amdgpu_ctx_alloc(struct amdgpu_device
*adev
,
102 struct amdgpu_fpriv
*fpriv
,
105 struct amdgpu_ctx_mgr
*mgr
= &fpriv
->ctx_mgr
;
106 struct amdgpu_ctx
*ctx
;
109 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
113 mutex_lock(&mgr
->lock
);
114 r
= idr_alloc(&mgr
->ctx_handles
, ctx
, 1, 0, GFP_KERNEL
);
116 mutex_unlock(&mgr
->lock
);
121 r
= amdgpu_ctx_init(adev
, ctx
);
123 idr_remove(&mgr
->ctx_handles
, *id
);
127 mutex_unlock(&mgr
->lock
);
131 static void amdgpu_ctx_do_release(struct kref
*ref
)
133 struct amdgpu_ctx
*ctx
;
135 ctx
= container_of(ref
, struct amdgpu_ctx
, refcount
);
137 amdgpu_ctx_fini(ctx
);
142 static int amdgpu_ctx_free(struct amdgpu_fpriv
*fpriv
, uint32_t id
)
144 struct amdgpu_ctx_mgr
*mgr
= &fpriv
->ctx_mgr
;
145 struct amdgpu_ctx
*ctx
;
147 mutex_lock(&mgr
->lock
);
148 ctx
= idr_remove(&mgr
->ctx_handles
, id
);
150 kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
);
151 mutex_unlock(&mgr
->lock
);
152 return ctx
? 0 : -EINVAL
;
155 static int amdgpu_ctx_query(struct amdgpu_device
*adev
,
156 struct amdgpu_fpriv
*fpriv
, uint32_t id
,
157 union drm_amdgpu_ctx_out
*out
)
159 struct amdgpu_ctx
*ctx
;
160 struct amdgpu_ctx_mgr
*mgr
;
161 unsigned reset_counter
;
166 mgr
= &fpriv
->ctx_mgr
;
167 mutex_lock(&mgr
->lock
);
168 ctx
= idr_find(&mgr
->ctx_handles
, id
);
170 mutex_unlock(&mgr
->lock
);
174 /* TODO: these two are always zero */
175 out
->state
.flags
= 0x0;
176 out
->state
.hangs
= 0x0;
178 /* determine if a GPU reset has occured since the last call */
179 reset_counter
= atomic_read(&adev
->gpu_reset_counter
);
180 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
181 if (ctx
->reset_counter
== reset_counter
)
182 out
->state
.reset_status
= AMDGPU_CTX_NO_RESET
;
184 out
->state
.reset_status
= AMDGPU_CTX_UNKNOWN_RESET
;
185 ctx
->reset_counter
= reset_counter
;
187 mutex_unlock(&mgr
->lock
);
191 int amdgpu_ctx_ioctl(struct drm_device
*dev
, void *data
,
192 struct drm_file
*filp
)
197 union drm_amdgpu_ctx
*args
= data
;
198 struct amdgpu_device
*adev
= dev
->dev_private
;
199 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
202 id
= args
->in
.ctx_id
;
204 switch (args
->in
.op
) {
205 case AMDGPU_CTX_OP_ALLOC_CTX
:
206 r
= amdgpu_ctx_alloc(adev
, fpriv
, &id
);
207 args
->out
.alloc
.ctx_id
= id
;
209 case AMDGPU_CTX_OP_FREE_CTX
:
210 r
= amdgpu_ctx_free(fpriv
, id
);
212 case AMDGPU_CTX_OP_QUERY_STATE
:
213 r
= amdgpu_ctx_query(adev
, fpriv
, id
, &args
->out
);
222 struct amdgpu_ctx
*amdgpu_ctx_get(struct amdgpu_fpriv
*fpriv
, uint32_t id
)
224 struct amdgpu_ctx
*ctx
;
225 struct amdgpu_ctx_mgr
*mgr
;
230 mgr
= &fpriv
->ctx_mgr
;
232 mutex_lock(&mgr
->lock
);
233 ctx
= idr_find(&mgr
->ctx_handles
, id
);
235 kref_get(&ctx
->refcount
);
236 mutex_unlock(&mgr
->lock
);
240 int amdgpu_ctx_put(struct amdgpu_ctx
*ctx
)
245 kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
);
249 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx
*ctx
, struct amdgpu_ring
*ring
,
250 struct dma_fence
*fence
)
252 struct amdgpu_ctx_ring
*cring
= & ctx
->rings
[ring
->idx
];
253 uint64_t seq
= cring
->sequence
;
255 struct dma_fence
*other
= NULL
;
257 idx
= seq
& (amdgpu_sched_jobs
- 1);
258 other
= cring
->fences
[idx
];
261 r
= dma_fence_wait_timeout(other
, false, MAX_SCHEDULE_TIMEOUT
);
263 DRM_ERROR("Error (%ld) waiting for fence!\n", r
);
266 dma_fence_get(fence
);
268 spin_lock(&ctx
->ring_lock
);
269 cring
->fences
[idx
] = fence
;
271 spin_unlock(&ctx
->ring_lock
);
273 dma_fence_put(other
);
278 struct dma_fence
*amdgpu_ctx_get_fence(struct amdgpu_ctx
*ctx
,
279 struct amdgpu_ring
*ring
, uint64_t seq
)
281 struct amdgpu_ctx_ring
*cring
= & ctx
->rings
[ring
->idx
];
282 struct dma_fence
*fence
;
284 spin_lock(&ctx
->ring_lock
);
287 seq
= ctx
->rings
[ring
->idx
].sequence
- 1;
289 if (seq
>= cring
->sequence
) {
290 spin_unlock(&ctx
->ring_lock
);
291 return ERR_PTR(-EINVAL
);
295 if (seq
+ amdgpu_sched_jobs
< cring
->sequence
) {
296 spin_unlock(&ctx
->ring_lock
);
300 fence
= dma_fence_get(cring
->fences
[seq
& (amdgpu_sched_jobs
- 1)]);
301 spin_unlock(&ctx
->ring_lock
);
306 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr
*mgr
)
308 mutex_init(&mgr
->lock
);
309 idr_init(&mgr
->ctx_handles
);
312 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr
*mgr
)
314 struct amdgpu_ctx
*ctx
;
318 idp
= &mgr
->ctx_handles
;
320 idr_for_each_entry(idp
, ctx
, id
) {
321 if (kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
) != 1)
322 DRM_ERROR("ctx %p is still alive\n", ctx
);
325 idr_destroy(&mgr
->ctx_handles
);
326 mutex_destroy(&mgr
->lock
);