]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
33#include "amdgpu_trace.h"
34
f91b3a69
CK
35struct amdgpu_sync_entry {
36 struct hlist_node node;
f54d1867 37 struct dma_fence *fence;
f91b3a69
CK
38};
39
257bf15a
CK
40static struct kmem_cache *amdgpu_sync_slab;
41
d38ceaf9
AD
42/**
43 * amdgpu_sync_create - zero init sync object
44 *
45 * @sync: sync object to initialize
46 *
47 * Just clear the sync object for now.
48 */
49void amdgpu_sync_create(struct amdgpu_sync *sync)
50{
f91b3a69 51 hash_init(sync->fences);
d38ceaf9
AD
52 sync->last_vm_update = NULL;
53}
54
bcc634f4
CK
55/**
56 * amdgpu_sync_same_dev - test if fence belong to us
57 *
58 * @adev: amdgpu device to use for the test
59 * @f: fence to test
60 *
61 * Test if the fence was issued by us.
62 */
f54d1867
CW
63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
64 struct dma_fence *f)
3c62338c 65{
3c62338c
CZ
66 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
67
4f839a24
CK
68 if (s_fence) {
69 struct amdgpu_ring *ring;
70
71 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
72 return ring->adev == adev;
73 }
74
3c62338c
CZ
75 return false;
76}
77
bcc634f4
CK
78/**
79 * amdgpu_sync_get_owner - extract the owner of a fence
80 *
81 * @fence: fence get the owner from
82 *
83 * Extract who originally created the fence.
84 */
f54d1867 85static void *amdgpu_sync_get_owner(struct dma_fence *f)
3c62338c 86{
3c62338c 87 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
bcc634f4 88
3c62338c 89 if (s_fence)
bcc634f4 90 return s_fence->owner;
336d1f5e 91
bcc634f4 92 return AMDGPU_FENCE_OWNER_UNDEFINED;
3c62338c
CZ
93}
94
bcc634f4
CK
95/**
96 * amdgpu_sync_keep_later - Keep the later fence
97 *
98 * @keep: existing fence to test
99 * @fence: new fence
100 *
101 * Either keep the existing fence or the new one, depending which one is later.
102 */
f54d1867
CW
103static void amdgpu_sync_keep_later(struct dma_fence **keep,
104 struct dma_fence *fence)
24233860 105{
f54d1867 106 if (*keep && dma_fence_is_later(*keep, fence))
24233860
CK
107 return;
108
f54d1867
CW
109 dma_fence_put(*keep);
110 *keep = dma_fence_get(fence);
24233860
CK
111}
112
832a902f
CK
113/**
114 * amdgpu_sync_add_later - add the fence to the hash
115 *
116 * @sync: sync object to add the fence to
117 * @f: fence to add
118 *
119 * Tries to add the fence to an existing hash entry. Returns true when an entry
120 * was found, false otherwise.
121 */
f54d1867 122static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
832a902f
CK
123{
124 struct amdgpu_sync_entry *e;
125
126 hash_for_each_possible(sync->fences, e, node, f->context) {
127 if (unlikely(e->fence->context != f->context))
128 continue;
129
130 amdgpu_sync_keep_later(&e->fence, f);
131 return true;
132 }
133 return false;
134}
135
d38ceaf9 136/**
91e1a520 137 * amdgpu_sync_fence - remember to sync to this fence
d38ceaf9
AD
138 *
139 * @sync: sync object to add fence to
140 * @fence: fence to sync to
141 *
d38ceaf9 142 */
91e1a520 143int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
f54d1867 144 struct dma_fence *f)
d38ceaf9 145{
f91b3a69 146 struct amdgpu_sync_entry *e;
d38ceaf9 147
91e1a520
CK
148 if (!f)
149 return 0;
150
3c62338c 151 if (amdgpu_sync_same_dev(adev, f) &&
bcc634f4 152 amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
24233860 153 amdgpu_sync_keep_later(&sync->last_vm_update, f);
3c62338c 154
832a902f 155 if (amdgpu_sync_add_later(sync, f))
f91b3a69 156 return 0;
d38ceaf9 157
257bf15a 158 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
046c12c6
CK
159 if (!e)
160 return -ENOMEM;
d38ceaf9 161
046c12c6 162 hash_add(sync->fences, &e->node, f->context);
f54d1867 163 e->fence = dma_fence_get(f);
91e1a520 164 return 0;
d38ceaf9
AD
165}
166
167/**
2f4b9400 168 * amdgpu_sync_resv - sync to a reservation object
d38ceaf9
AD
169 *
170 * @sync: sync object to add fences from reservation object to
171 * @resv: reservation object with embedded fence
172 * @shared: true if we should only sync to the exclusive fence
173 *
2f4b9400 174 * Sync to the fence
d38ceaf9
AD
175 */
176int amdgpu_sync_resv(struct amdgpu_device *adev,
177 struct amdgpu_sync *sync,
178 struct reservation_object *resv,
179 void *owner)
180{
181 struct reservation_object_list *flist;
f54d1867 182 struct dma_fence *f;
423a9480 183 void *fence_owner;
d38ceaf9
AD
184 unsigned i;
185 int r = 0;
186
4b095304
JZ
187 if (resv == NULL)
188 return -EINVAL;
189
d38ceaf9
AD
190 /* always sync to the exclusive fence */
191 f = reservation_object_get_excl(resv);
91e1a520 192 r = amdgpu_sync_fence(adev, sync, f);
d38ceaf9
AD
193
194 flist = reservation_object_get_list(resv);
195 if (!flist || r)
196 return r;
197
198 for (i = 0; i < flist->shared_count; ++i) {
199 f = rcu_dereference_protected(flist->shared[i],
200 reservation_object_held(resv));
423a9480 201 if (amdgpu_sync_same_dev(adev, f)) {
1d3897e0
CK
202 /* VM updates are only interesting
203 * for other VM updates and moves.
204 */
423a9480 205 fence_owner = amdgpu_sync_get_owner(f);
7a91d6cb
CK
206 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
207 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
1d3897e0 208 ((owner == AMDGPU_FENCE_OWNER_VM) !=
423a9480 209 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
91e1a520
CK
210 continue;
211
1d3897e0
CK
212 /* Ignore fence from the same owner as
213 * long as it isn't undefined.
214 */
215 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
423a9480 216 fence_owner == owner)
1d3897e0
CK
217 continue;
218 }
219
91e1a520
CK
220 r = amdgpu_sync_fence(adev, sync, f);
221 if (r)
222 break;
d38ceaf9
AD
223 }
224 return r;
225}
226
832a902f 227/**
1fbb2e92 228 * amdgpu_sync_peek_fence - get the next fence not signaled yet
832a902f
CK
229 *
230 * @sync: the sync object
35420238 231 * @ring: optional ring to use for test
832a902f 232 *
1fbb2e92
CK
233 * Returns the next fence not signaled yet without removing it from the sync
234 * object.
832a902f 235 */
f54d1867
CW
236struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
237 struct amdgpu_ring *ring)
832a902f
CK
238{
239 struct amdgpu_sync_entry *e;
240 struct hlist_node *tmp;
241 int i;
242
243 hash_for_each_safe(sync->fences, i, tmp, e, node) {
f54d1867 244 struct dma_fence *f = e->fence;
35420238
CK
245 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
246
7a7c286d
CZ
247 if (dma_fence_is_signaled(f)) {
248 hash_del(&e->node);
249 dma_fence_put(f);
250 kmem_cache_free(amdgpu_sync_slab, e);
251 continue;
252 }
35420238
CK
253 if (ring && s_fence) {
254 /* For fences from the same ring it is sufficient
255 * when they are scheduled.
256 */
1fbb2e92 257 if (s_fence->sched == &ring->sched) {
f54d1867 258 if (dma_fence_is_signaled(&s_fence->scheduled))
1fbb2e92 259 continue;
832a902f 260
1fbb2e92
CK
261 return &s_fence->scheduled;
262 }
832a902f
CK
263 }
264
1fbb2e92 265 return f;
832a902f
CK
266 }
267
1fbb2e92 268 return NULL;
832a902f
CK
269}
270
0e9d239b
CK
271/**
272 * amdgpu_sync_get_fence - get the next fence from the sync object
273 *
274 * @sync: sync object to use
275 *
276 * Get and removes the next fence from the sync object not signaled yet.
277 */
f54d1867 278struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
e61235db
CK
279{
280 struct amdgpu_sync_entry *e;
281 struct hlist_node *tmp;
f54d1867 282 struct dma_fence *f;
e61235db
CK
283 int i;
284
285 hash_for_each_safe(sync->fences, i, tmp, e, node) {
286
287 f = e->fence;
288
289 hash_del(&e->node);
257bf15a 290 kmem_cache_free(amdgpu_sync_slab, e);
e61235db 291
f54d1867 292 if (!dma_fence_is_signaled(f))
e61235db
CK
293 return f;
294
f54d1867 295 dma_fence_put(f);
e61235db
CK
296 }
297 return NULL;
298}
299
a6583af4
HK
300int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
301{
302 struct amdgpu_sync_entry *e;
303 struct hlist_node *tmp;
304 int i, r;
305
306 hash_for_each_safe(sync->fences, i, tmp, e, node) {
307 r = dma_fence_wait(e->fence, intr);
308 if (r)
309 return r;
310
311 hash_del(&e->node);
312 dma_fence_put(e->fence);
313 kmem_cache_free(amdgpu_sync_slab, e);
314 }
315
316 return 0;
317}
318
d38ceaf9
AD
319/**
320 * amdgpu_sync_free - free the sync object
321 *
d38ceaf9 322 * @sync: sync object to use
d38ceaf9 323 *
2f4b9400 324 * Free the sync object.
d38ceaf9 325 */
8a8f0b48 326void amdgpu_sync_free(struct amdgpu_sync *sync)
d38ceaf9 327{
f91b3a69
CK
328 struct amdgpu_sync_entry *e;
329 struct hlist_node *tmp;
d38ceaf9
AD
330 unsigned i;
331
f91b3a69
CK
332 hash_for_each_safe(sync->fences, i, tmp, e, node) {
333 hash_del(&e->node);
f54d1867 334 dma_fence_put(e->fence);
257bf15a 335 kmem_cache_free(amdgpu_sync_slab, e);
f91b3a69
CK
336 }
337
f54d1867 338 dma_fence_put(sync->last_vm_update);
d38ceaf9 339}
257bf15a
CK
340
341/**
342 * amdgpu_sync_init - init sync object subsystem
343 *
344 * Allocate the slab allocator.
345 */
346int amdgpu_sync_init(void)
347{
348 amdgpu_sync_slab = kmem_cache_create(
349 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
350 SLAB_HWCACHE_ALIGN, NULL);
351 if (!amdgpu_sync_slab)
352 return -ENOMEM;
353
354 return 0;
355}
356
357/**
358 * amdgpu_sync_fini - fini sync object subsystem
359 *
360 * Free the slab allocator.
361 */
362void amdgpu_sync_fini(void)
363{
364 kmem_cache_destroy(amdgpu_sync_slab);
365}