]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
amdgpu/cs: split out fence dependency checking (v2)
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
568d7c76 27#include <linux/pagemap.h>
d38ceaf9
AD
28#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h>
30#include "amdgpu.h"
31#include "amdgpu_trace.h"
32
91acbeb6 33static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
758ac17f
CK
34 struct drm_amdgpu_cs_chunk_fence *data,
35 uint32_t *offset)
91acbeb6
CK
36{
37 struct drm_gem_object *gobj;
aa29040b 38 unsigned long size;
91acbeb6 39
a8ad0bd8 40 gobj = drm_gem_object_lookup(p->filp, data->handle);
91acbeb6
CK
41 if (gobj == NULL)
42 return -EINVAL;
43
758ac17f 44 p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
91acbeb6
CK
45 p->uf_entry.priority = 0;
46 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
47 p->uf_entry.tv.shared = true;
2f568dbd 48 p->uf_entry.user_pages = NULL;
aa29040b
CK
49
50 size = amdgpu_bo_size(p->uf_entry.robj);
51 if (size != PAGE_SIZE || (data->offset + 8) > size)
52 return -EINVAL;
53
758ac17f 54 *offset = data->offset;
91acbeb6
CK
55
56 drm_gem_object_unreference_unlocked(gobj);
758ac17f
CK
57
58 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
59 amdgpu_bo_unref(&p->uf_entry.robj);
60 return -EINVAL;
61 }
62
91acbeb6
CK
63 return 0;
64}
65
d38ceaf9
AD
66int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
67{
4c0b242c 68 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
c5637837 69 struct amdgpu_vm *vm = &fpriv->vm;
d38ceaf9
AD
70 union drm_amdgpu_cs *cs = data;
71 uint64_t *chunk_array_user;
1d263474 72 uint64_t *chunk_array;
50838c8c 73 unsigned size, num_ibs = 0;
758ac17f 74 uint32_t uf_offset = 0;
54313503 75 int i;
1d263474 76 int ret;
d38ceaf9 77
1d263474
DC
78 if (cs->in.num_chunks == 0)
79 return 0;
80
81 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
82 if (!chunk_array)
83 return -ENOMEM;
d38ceaf9 84
3cb485f3
CK
85 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
86 if (!p->ctx) {
1d263474
DC
87 ret = -EINVAL;
88 goto free_chunk;
3cb485f3 89 }
1d263474 90
d38ceaf9 91 /* get chunks */
f4e7c7c1 92 chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
d38ceaf9
AD
93 if (copy_from_user(chunk_array, chunk_array_user,
94 sizeof(uint64_t)*cs->in.num_chunks)) {
1d263474 95 ret = -EFAULT;
2a7d9bda 96 goto put_ctx;
d38ceaf9
AD
97 }
98
99 p->nchunks = cs->in.num_chunks;
e60b344f 100 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
d38ceaf9 101 GFP_KERNEL);
1d263474
DC
102 if (!p->chunks) {
103 ret = -ENOMEM;
2a7d9bda 104 goto put_ctx;
d38ceaf9
AD
105 }
106
107 for (i = 0; i < p->nchunks; i++) {
108 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
109 struct drm_amdgpu_cs_chunk user_chunk;
110 uint32_t __user *cdata;
111
f4e7c7c1 112 chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
d38ceaf9
AD
113 if (copy_from_user(&user_chunk, chunk_ptr,
114 sizeof(struct drm_amdgpu_cs_chunk))) {
1d263474
DC
115 ret = -EFAULT;
116 i--;
117 goto free_partial_kdata;
d38ceaf9
AD
118 }
119 p->chunks[i].chunk_id = user_chunk.chunk_id;
120 p->chunks[i].length_dw = user_chunk.length_dw;
d38ceaf9
AD
121
122 size = p->chunks[i].length_dw;
f4e7c7c1 123 cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
d38ceaf9 124
2098105e 125 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
d38ceaf9 126 if (p->chunks[i].kdata == NULL) {
1d263474
DC
127 ret = -ENOMEM;
128 i--;
129 goto free_partial_kdata;
d38ceaf9
AD
130 }
131 size *= sizeof(uint32_t);
132 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
1d263474
DC
133 ret = -EFAULT;
134 goto free_partial_kdata;
d38ceaf9
AD
135 }
136
9a5e8fb1
CK
137 switch (p->chunks[i].chunk_id) {
138 case AMDGPU_CHUNK_ID_IB:
50838c8c 139 ++num_ibs;
9a5e8fb1
CK
140 break;
141
142 case AMDGPU_CHUNK_ID_FENCE:
d38ceaf9 143 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
91acbeb6 144 if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
1d263474
DC
145 ret = -EINVAL;
146 goto free_partial_kdata;
d38ceaf9 147 }
91acbeb6 148
758ac17f
CK
149 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
150 &uf_offset);
91acbeb6
CK
151 if (ret)
152 goto free_partial_kdata;
153
9a5e8fb1
CK
154 break;
155
2b48d323
CK
156 case AMDGPU_CHUNK_ID_DEPENDENCIES:
157 break;
158
9a5e8fb1 159 default:
1d263474
DC
160 ret = -EINVAL;
161 goto free_partial_kdata;
d38ceaf9
AD
162 }
163 }
164
c5637837 165 ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
50838c8c 166 if (ret)
4acabfe3 167 goto free_all_kdata;
d38ceaf9 168
b5f5acbc
CK
169 if (p->uf_entry.robj)
170 p->job->uf_addr = uf_offset;
d38ceaf9 171 kfree(chunk_array);
1d263474
DC
172 return 0;
173
174free_all_kdata:
175 i = p->nchunks - 1;
176free_partial_kdata:
177 for (; i >= 0; i--)
2098105e 178 kvfree(p->chunks[i].kdata);
1d263474 179 kfree(p->chunks);
607523d1
DA
180 p->chunks = NULL;
181 p->nchunks = 0;
2a7d9bda 182put_ctx:
1d263474
DC
183 amdgpu_ctx_put(p->ctx);
184free_chunk:
185 kfree(chunk_array);
186
187 return ret;
d38ceaf9
AD
188}
189
95844d20
MO
190/* Convert microseconds to bytes. */
191static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
192{
193 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
194 return 0;
195
196 /* Since accum_us is incremented by a million per second, just
197 * multiply it by the number of MB/s to get the number of bytes.
198 */
199 return us << adev->mm_stats.log2_max_MBps;
200}
201
202static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
203{
204 if (!adev->mm_stats.log2_max_MBps)
205 return 0;
206
207 return bytes >> adev->mm_stats.log2_max_MBps;
208}
209
210/* Returns how many bytes TTM can move right now. If no bytes can be moved,
211 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
212 * which means it can go over the threshold once. If that happens, the driver
213 * will be in debt and no other buffer migrations can be done until that debt
214 * is repaid.
215 *
216 * This approach allows moving a buffer of any size (it's important to allow
217 * that).
218 *
219 * The currency is simply time in microseconds and it increases as the clock
220 * ticks. The accumulated microseconds (us) are converted to bytes and
221 * returned.
d38ceaf9
AD
222 */
223static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
224{
95844d20
MO
225 s64 time_us, increment_us;
226 u64 max_bytes;
227 u64 free_vram, total_vram, used_vram;
d38ceaf9 228
95844d20
MO
229 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
230 * throttling.
d38ceaf9 231 *
95844d20
MO
232 * It means that in order to get full max MBps, at least 5 IBs per
233 * second must be submitted and not more than 200ms apart from each
234 * other.
235 */
236 const s64 us_upper_bound = 200000;
d38ceaf9 237
95844d20
MO
238 if (!adev->mm_stats.log2_max_MBps)
239 return 0;
240
241 total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
242 used_vram = atomic64_read(&adev->vram_usage);
243 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
244
245 spin_lock(&adev->mm_stats.lock);
246
247 /* Increase the amount of accumulated us. */
248 time_us = ktime_to_us(ktime_get());
249 increment_us = time_us - adev->mm_stats.last_update_us;
250 adev->mm_stats.last_update_us = time_us;
251 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
252 us_upper_bound);
253
254 /* This prevents the short period of low performance when the VRAM
255 * usage is low and the driver is in debt or doesn't have enough
256 * accumulated us to fill VRAM quickly.
d38ceaf9 257 *
95844d20
MO
258 * The situation can occur in these cases:
259 * - a lot of VRAM is freed by userspace
260 * - the presence of a big buffer causes a lot of evictions
261 * (solution: split buffers into smaller ones)
d38ceaf9 262 *
95844d20
MO
263 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
264 * accum_us to a positive number.
d38ceaf9 265 */
95844d20
MO
266 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
267 s64 min_us;
268
269 /* Be more aggresive on dGPUs. Try to fill a portion of free
270 * VRAM now.
271 */
272 if (!(adev->flags & AMD_IS_APU))
273 min_us = bytes_to_us(adev, free_vram / 4);
274 else
275 min_us = 0; /* Reset accum_us on APUs. */
276
277 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
278 }
d38ceaf9 279
95844d20
MO
280 /* This returns 0 if the driver is in debt to disallow (optional)
281 * buffer moves.
282 */
283 max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
284
285 spin_unlock(&adev->mm_stats.lock);
286 return max_bytes;
287}
288
289/* Report how many bytes have really been moved for the last command
290 * submission. This can result in a debt that can stop buffer migrations
291 * temporarily.
292 */
fad06127 293void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
95844d20
MO
294{
295 spin_lock(&adev->mm_stats.lock);
296 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
297 spin_unlock(&adev->mm_stats.lock);
d38ceaf9
AD
298}
299
14fd833e
CZ
300static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
301 struct amdgpu_bo *bo)
302{
a7d64de6 303 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
14fd833e
CZ
304 u64 initial_bytes_moved;
305 uint32_t domain;
306 int r;
307
308 if (bo->pin_count)
309 return 0;
310
95844d20
MO
311 /* Don't move this buffer if we have depleted our allowance
312 * to move it. Don't move anything if the threshold is zero.
14fd833e 313 */
95844d20 314 if (p->bytes_moved < p->bytes_moved_threshold)
14fd833e
CZ
315 domain = bo->prefered_domains;
316 else
317 domain = bo->allowed_domains;
318
319retry:
320 amdgpu_ttm_placement_from_domain(bo, domain);
a7d64de6 321 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
14fd833e 322 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
a7d64de6 323 p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
14fd833e
CZ
324 initial_bytes_moved;
325
1abdc3d7
CK
326 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
327 domain = bo->allowed_domains;
328 goto retry;
14fd833e
CZ
329 }
330
331 return r;
332}
333
662bfa61
CK
334/* Last resort, try to evict something from the current working set */
335static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
f7da30d9 336 struct amdgpu_bo *validated)
662bfa61 337{
f7da30d9 338 uint32_t domain = validated->allowed_domains;
662bfa61
CK
339 int r;
340
341 if (!p->evictable)
342 return false;
343
344 for (;&p->evictable->tv.head != &p->validated;
345 p->evictable = list_prev_entry(p->evictable, tv.head)) {
346
347 struct amdgpu_bo_list_entry *candidate = p->evictable;
348 struct amdgpu_bo *bo = candidate->robj;
a7d64de6 349 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
662bfa61
CK
350 u64 initial_bytes_moved;
351 uint32_t other;
352
353 /* If we reached our current BO we can forget it */
f7da30d9 354 if (candidate->robj == validated)
662bfa61
CK
355 break;
356
357 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
358
359 /* Check if this BO is in one of the domains we need space for */
360 if (!(other & domain))
361 continue;
362
363 /* Check if we can move this BO somewhere else */
364 other = bo->allowed_domains & ~domain;
365 if (!other)
366 continue;
367
368 /* Good we can try to move this BO somewhere else */
369 amdgpu_ttm_placement_from_domain(bo, other);
a7d64de6 370 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
662bfa61 371 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
a7d64de6 372 p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
662bfa61
CK
373 initial_bytes_moved;
374
375 if (unlikely(r))
376 break;
377
378 p->evictable = list_prev_entry(p->evictable, tv.head);
379 list_move(&candidate->tv.head, &p->validated);
380
381 return true;
382 }
383
384 return false;
385}
386
f7da30d9
CK
387static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
388{
389 struct amdgpu_cs_parser *p = param;
390 int r;
391
392 do {
393 r = amdgpu_cs_bo_validate(p, bo);
394 } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
395 if (r)
396 return r;
397
398 if (bo->shadow)
1cd99a8d 399 r = amdgpu_cs_bo_validate(p, bo->shadow);
f7da30d9
CK
400
401 return r;
402}
403
761c2e82 404static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
a5b75058 405 struct list_head *validated)
d38ceaf9 406{
d38ceaf9 407 struct amdgpu_bo_list_entry *lobj;
d38ceaf9
AD
408 int r;
409
a5b75058 410 list_for_each_entry(lobj, validated, tv.head) {
36409d12 411 struct amdgpu_bo *bo = lobj->robj;
2f568dbd 412 bool binding_userptr = false;
cc325d19 413 struct mm_struct *usermm;
d38ceaf9 414
cc325d19
CK
415 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
416 if (usermm && usermm != current->mm)
417 return -EPERM;
418
2f568dbd
CK
419 /* Check if we have user pages and nobody bound the BO already */
420 if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
421 size_t size = sizeof(struct page *);
422
423 size *= bo->tbo.ttm->num_pages;
424 memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
425 binding_userptr = true;
426 }
427
662bfa61
CK
428 if (p->evictable == lobj)
429 p->evictable = NULL;
430
f7da30d9 431 r = amdgpu_cs_validate(p, bo);
14fd833e 432 if (r)
36409d12 433 return r;
662bfa61 434
2f568dbd 435 if (binding_userptr) {
2098105e 436 kvfree(lobj->user_pages);
2f568dbd
CK
437 lobj->user_pages = NULL;
438 }
d38ceaf9
AD
439 }
440 return 0;
441}
442
2a7d9bda
CK
443static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
444 union drm_amdgpu_cs *cs)
d38ceaf9
AD
445{
446 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
2f568dbd 447 struct amdgpu_bo_list_entry *e;
a5b75058 448 struct list_head duplicates;
840d5144 449 bool need_mmap_lock = false;
2f568dbd 450 unsigned i, tries = 10;
636ce25c 451 int r;
d38ceaf9 452
2a7d9bda
CK
453 INIT_LIST_HEAD(&p->validated);
454
455 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
840d5144 456 if (p->bo_list) {
211dff55
CK
457 need_mmap_lock = p->bo_list->first_userptr !=
458 p->bo_list->num_entries;
636ce25c 459 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
840d5144 460 }
d38ceaf9 461
3c0eea6c 462 INIT_LIST_HEAD(&duplicates);
56467ebf 463 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
d38ceaf9 464
758ac17f 465 if (p->uf_entry.robj)
91acbeb6
CK
466 list_add(&p->uf_entry.tv.head, &p->validated);
467
d38ceaf9
AD
468 if (need_mmap_lock)
469 down_read(&current->mm->mmap_sem);
470
2f568dbd
CK
471 while (1) {
472 struct list_head need_pages;
473 unsigned i;
474
475 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
476 &duplicates);
f1037950 477 if (unlikely(r != 0)) {
57d7f9b6 478 if (r != -ERESTARTSYS)
479 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
2f568dbd 480 goto error_free_pages;
f1037950 481 }
2f568dbd
CK
482
483 /* Without a BO list we don't have userptr BOs */
484 if (!p->bo_list)
485 break;
486
487 INIT_LIST_HEAD(&need_pages);
488 for (i = p->bo_list->first_userptr;
489 i < p->bo_list->num_entries; ++i) {
490
491 e = &p->bo_list->array[i];
492
493 if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
494 &e->user_invalidated) && e->user_pages) {
495
496 /* We acquired a page array, but somebody
497 * invalidated it. Free it an try again
498 */
499 release_pages(e->user_pages,
500 e->robj->tbo.ttm->num_pages,
501 false);
2098105e 502 kvfree(e->user_pages);
2f568dbd
CK
503 e->user_pages = NULL;
504 }
505
506 if (e->robj->tbo.ttm->state != tt_bound &&
507 !e->user_pages) {
508 list_del(&e->tv.head);
509 list_add(&e->tv.head, &need_pages);
510
511 amdgpu_bo_unreserve(e->robj);
512 }
513 }
514
515 if (list_empty(&need_pages))
516 break;
517
518 /* Unreserve everything again. */
519 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
520
f1037950 521 /* We tried too many times, just abort */
2f568dbd
CK
522 if (!--tries) {
523 r = -EDEADLK;
f1037950 524 DRM_ERROR("deadlock in %s\n", __func__);
2f568dbd
CK
525 goto error_free_pages;
526 }
527
eb0f0373 528 /* Fill the page arrays for all userptrs. */
2f568dbd
CK
529 list_for_each_entry(e, &need_pages, tv.head) {
530 struct ttm_tt *ttm = e->robj->tbo.ttm;
531
2098105e
MH
532 e->user_pages = kvmalloc_array(ttm->num_pages,
533 sizeof(struct page*),
534 GFP_KERNEL | __GFP_ZERO);
2f568dbd
CK
535 if (!e->user_pages) {
536 r = -ENOMEM;
f1037950 537 DRM_ERROR("calloc failure in %s\n", __func__);
2f568dbd
CK
538 goto error_free_pages;
539 }
540
541 r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
542 if (r) {
f1037950 543 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
2098105e 544 kvfree(e->user_pages);
2f568dbd
CK
545 e->user_pages = NULL;
546 goto error_free_pages;
547 }
548 }
549
550 /* And try again. */
551 list_splice(&need_pages, &p->validated);
552 }
a5b75058 553
f69f90a1
CK
554 p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
555 p->bytes_moved = 0;
662bfa61
CK
556 p->evictable = list_last_entry(&p->validated,
557 struct amdgpu_bo_list_entry,
558 tv.head);
f69f90a1 559
f7da30d9
CK
560 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
561 amdgpu_cs_validate, p);
562 if (r) {
563 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
564 goto error_validate;
565 }
566
f69f90a1 567 r = amdgpu_cs_list_validate(p, &duplicates);
f1037950
MO
568 if (r) {
569 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
a5b75058 570 goto error_validate;
f1037950 571 }
a5b75058 572
f69f90a1 573 r = amdgpu_cs_list_validate(p, &p->validated);
f1037950
MO
574 if (r) {
575 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
a8480309 576 goto error_validate;
f1037950 577 }
a8480309 578
95844d20
MO
579 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
580
5a712a87
CK
581 fpriv->vm.last_eviction_counter =
582 atomic64_read(&p->adev->num_evictions);
583
a8480309 584 if (p->bo_list) {
d88bf583
CK
585 struct amdgpu_bo *gds = p->bo_list->gds_obj;
586 struct amdgpu_bo *gws = p->bo_list->gws_obj;
587 struct amdgpu_bo *oa = p->bo_list->oa_obj;
a8480309
CK
588 struct amdgpu_vm *vm = &fpriv->vm;
589 unsigned i;
590
591 for (i = 0; i < p->bo_list->num_entries; i++) {
592 struct amdgpu_bo *bo = p->bo_list->array[i].robj;
593
594 p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
595 }
d88bf583
CK
596
597 if (gds) {
598 p->job->gds_base = amdgpu_bo_gpu_offset(gds);
599 p->job->gds_size = amdgpu_bo_size(gds);
600 }
601 if (gws) {
602 p->job->gws_base = amdgpu_bo_gpu_offset(gws);
603 p->job->gws_size = amdgpu_bo_size(gws);
604 }
605 if (oa) {
606 p->job->oa_base = amdgpu_bo_gpu_offset(oa);
607 p->job->oa_size = amdgpu_bo_size(oa);
608 }
a8480309 609 }
a5b75058 610
c855e250
CK
611 if (!r && p->uf_entry.robj) {
612 struct amdgpu_bo *uf = p->uf_entry.robj;
613
bb990bb0 614 r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
c855e250
CK
615 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
616 }
b5f5acbc 617
a5b75058 618error_validate:
eceb8a15
CK
619 if (r) {
620 amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
a5b75058 621 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
eceb8a15 622 }
d38ceaf9 623
2f568dbd
CK
624error_free_pages:
625
d38ceaf9
AD
626 if (need_mmap_lock)
627 up_read(&current->mm->mmap_sem);
628
2f568dbd
CK
629 if (p->bo_list) {
630 for (i = p->bo_list->first_userptr;
631 i < p->bo_list->num_entries; ++i) {
632 e = &p->bo_list->array[i];
633
634 if (!e->user_pages)
635 continue;
636
637 release_pages(e->user_pages,
638 e->robj->tbo.ttm->num_pages,
639 false);
2098105e 640 kvfree(e->user_pages);
2f568dbd
CK
641 }
642 }
643
d38ceaf9
AD
644 return r;
645}
646
647static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
648{
649 struct amdgpu_bo_list_entry *e;
650 int r;
651
652 list_for_each_entry(e, &p->validated, tv.head) {
653 struct reservation_object *resv = e->robj->tbo.resv;
e86f9cee 654 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
d38ceaf9
AD
655
656 if (r)
657 return r;
658 }
659 return 0;
660}
661
984810fc
CK
662/**
663 * cs_parser_fini() - clean parser states
664 * @parser: parser structure holding parsing context.
665 * @error: error number
666 *
667 * If error is set than unvalidate buffer, otherwise just free memory
668 * used by parsing context.
669 **/
670static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
049fc527 671{
eceb8a15 672 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
984810fc
CK
673 unsigned i;
674
d38ceaf9 675 if (!error) {
28b8d66e
NH
676 amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
677
d38ceaf9 678 ttm_eu_fence_buffer_objects(&parser->ticket,
984810fc
CK
679 &parser->validated,
680 parser->fence);
d38ceaf9
AD
681 } else if (backoff) {
682 ttm_eu_backoff_reservation(&parser->ticket,
683 &parser->validated);
684 }
f54d1867 685 dma_fence_put(parser->fence);
7e52a81c 686
3cb485f3
CK
687 if (parser->ctx)
688 amdgpu_ctx_put(parser->ctx);
a3348bb8
CZ
689 if (parser->bo_list)
690 amdgpu_bo_list_put(parser->bo_list);
691
d38ceaf9 692 for (i = 0; i < parser->nchunks; i++)
2098105e 693 kvfree(parser->chunks[i].kdata);
d38ceaf9 694 kfree(parser->chunks);
50838c8c
CK
695 if (parser->job)
696 amdgpu_job_free(parser->job);
91acbeb6 697 amdgpu_bo_unref(&parser->uf_entry.robj);
d38ceaf9
AD
698}
699
b85891bd 700static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
d38ceaf9
AD
701{
702 struct amdgpu_device *adev = p->adev;
b85891bd
JZ
703 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
704 struct amdgpu_vm *vm = &fpriv->vm;
d38ceaf9
AD
705 struct amdgpu_bo_va *bo_va;
706 struct amdgpu_bo *bo;
707 int i, r;
708
194d2161 709 r = amdgpu_vm_update_directories(adev, vm);
d38ceaf9
AD
710 if (r)
711 return r;
712
a24960f3 713 r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
05906dec
BN
714 if (r)
715 return r;
716
f3467818 717 r = amdgpu_vm_clear_freed(adev, vm, NULL);
d38ceaf9
AD
718 if (r)
719 return r;
720
b85891bd
JZ
721 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
722 if (r)
723 return r;
724
725 r = amdgpu_sync_fence(adev, &p->job->sync,
726 fpriv->prt_va->last_pt_update);
727 if (r)
728 return r;
729
2493664f
ML
730 if (amdgpu_sriov_vf(adev)) {
731 struct dma_fence *f;
732 bo_va = vm->csa_bo_va;
733 BUG_ON(!bo_va);
734 r = amdgpu_vm_bo_update(adev, bo_va, false);
735 if (r)
736 return r;
737
738 f = bo_va->last_pt_update;
739 r = amdgpu_sync_fence(adev, &p->job->sync, f);
740 if (r)
741 return r;
742 }
743
d38ceaf9
AD
744 if (p->bo_list) {
745 for (i = 0; i < p->bo_list->num_entries; i++) {
f54d1867 746 struct dma_fence *f;
91e1a520 747
d38ceaf9
AD
748 /* ignore duplicates */
749 bo = p->bo_list->array[i].robj;
750 if (!bo)
751 continue;
752
753 bo_va = p->bo_list->array[i].bo_va;
754 if (bo_va == NULL)
755 continue;
756
99e124f4 757 r = amdgpu_vm_bo_update(adev, bo_va, false);
d38ceaf9
AD
758 if (r)
759 return r;
760
bb1e38a4 761 f = bo_va->last_pt_update;
e86f9cee 762 r = amdgpu_sync_fence(adev, &p->job->sync, f);
91e1a520
CK
763 if (r)
764 return r;
d38ceaf9 765 }
b495bd3a
CK
766
767 }
768
e86f9cee 769 r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
b495bd3a
CK
770
771 if (amdgpu_vm_debug && p->bo_list) {
772 /* Invalidate all BOs to test for userspace bugs */
773 for (i = 0; i < p->bo_list->num_entries; i++) {
774 /* ignore duplicates */
775 bo = p->bo_list->array[i].robj;
776 if (!bo)
777 continue;
778
779 amdgpu_vm_bo_invalidate(adev, bo);
780 }
d38ceaf9
AD
781 }
782
b495bd3a 783 return r;
d38ceaf9
AD
784}
785
786static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
b07c60c0 787 struct amdgpu_cs_parser *p)
d38ceaf9 788{
b07c60c0 789 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
d38ceaf9 790 struct amdgpu_vm *vm = &fpriv->vm;
b07c60c0 791 struct amdgpu_ring *ring = p->job->ring;
d38ceaf9
AD
792 int i, r;
793
d38ceaf9 794 /* Only for UVD/VCE VM emulation */
b07c60c0
CK
795 if (ring->funcs->parse_cs) {
796 for (i = 0; i < p->job->num_ibs; i++) {
797 r = amdgpu_ring_parse_cs(ring, p, i);
d38ceaf9
AD
798 if (r)
799 return r;
800 }
45088efc
CK
801 }
802
803 if (p->job->vm) {
67003a15 804 p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
281d144d 805
b85891bd 806 r = amdgpu_bo_vm_update_pte(p);
9a79588c
CK
807 if (r)
808 return r;
809 }
d38ceaf9 810
9a79588c 811 return amdgpu_cs_sync_rings(p);
d38ceaf9
AD
812}
813
d38ceaf9
AD
814static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
815 struct amdgpu_cs_parser *parser)
816{
817 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
818 struct amdgpu_vm *vm = &fpriv->vm;
819 int i, j;
9a1b3af1 820 int r, ce_preempt = 0, de_preempt = 0;
d38ceaf9 821
50838c8c 822 for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
d38ceaf9
AD
823 struct amdgpu_cs_chunk *chunk;
824 struct amdgpu_ib *ib;
825 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
d38ceaf9 826 struct amdgpu_ring *ring;
d38ceaf9
AD
827
828 chunk = &parser->chunks[i];
50838c8c 829 ib = &parser->job->ibs[j];
d38ceaf9
AD
830 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
831
832 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
833 continue;
834
65333e44 835 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
e51a3226 836 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
65333e44
ML
837 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
838 ce_preempt++;
839 else
840 de_preempt++;
e51a3226 841 }
65333e44
ML
842
843 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
844 if (ce_preempt > 1 || de_preempt > 1)
e9d672b2 845 return -EINVAL;
9a1b3af1
ML
846 }
847
effd924d
AR
848 r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
849 chunk_ib->ip_instance, chunk_ib->ring, &ring);
3ccec53c 850 if (r)
d38ceaf9 851 return r;
d38ceaf9 852
2a9ceb8d 853 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
753ad49c
ML
854 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
855 if (!parser->ctx->preamble_presented) {
856 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
857 parser->ctx->preamble_presented = true;
858 }
859 }
860
b07c60c0
CK
861 if (parser->job->ring && parser->job->ring != ring)
862 return -EINVAL;
863
864 parser->job->ring = ring;
865
d38ceaf9 866 if (ring->funcs->parse_cs) {
4802ce11 867 struct amdgpu_bo_va_mapping *m;
3ccec53c 868 struct amdgpu_bo *aobj = NULL;
4802ce11
CK
869 uint64_t offset;
870 uint8_t *kptr;
3ccec53c 871
4802ce11
CK
872 m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
873 &aobj);
3ccec53c
MO
874 if (!aobj) {
875 DRM_ERROR("IB va_start is invalid\n");
876 return -EINVAL;
d38ceaf9
AD
877 }
878
4802ce11 879 if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
a9f87f64 880 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
4802ce11
CK
881 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
882 return -EINVAL;
883 }
884
3ccec53c 885 /* the IB should be reserved at this point */
4802ce11 886 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
d38ceaf9 887 if (r) {
d38ceaf9
AD
888 return r;
889 }
890
a9f87f64 891 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
4802ce11
CK
892 kptr += chunk_ib->va_start - offset;
893
45088efc 894 r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
d38ceaf9
AD
895 if (r) {
896 DRM_ERROR("Failed to get ib !\n");
d38ceaf9
AD
897 return r;
898 }
899
900 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
901 amdgpu_bo_kunmap(aobj);
d38ceaf9 902 } else {
b07c60c0 903 r = amdgpu_ib_get(adev, vm, 0, ib);
d38ceaf9
AD
904 if (r) {
905 DRM_ERROR("Failed to get ib !\n");
d38ceaf9
AD
906 return r;
907 }
908
d38ceaf9 909 }
d38ceaf9 910
45088efc 911 ib->gpu_addr = chunk_ib->va_start;
3ccec53c 912 ib->length_dw = chunk_ib->ib_bytes / 4;
de807f81 913 ib->flags = chunk_ib->flags;
d38ceaf9
AD
914 j++;
915 }
916
758ac17f 917 /* UVD & VCE fw doesn't support user fences */
b5f5acbc 918 if (parser->job->uf_addr && (
21cd942e
CK
919 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
920 parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
758ac17f 921 return -EINVAL;
d38ceaf9
AD
922
923 return 0;
924}
925
6f0308eb
DA
926static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
927 struct amdgpu_cs_chunk *chunk)
2b48d323 928{
76a1ea61 929 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
6f0308eb
DA
930 unsigned num_deps;
931 int i, r;
932 struct drm_amdgpu_cs_chunk_dep *deps;
2b48d323 933
6f0308eb
DA
934 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
935 num_deps = chunk->length_dw * 4 /
936 sizeof(struct drm_amdgpu_cs_chunk_dep);
2b48d323 937
6f0308eb
DA
938 for (i = 0; i < num_deps; ++i) {
939 struct amdgpu_ring *ring;
940 struct amdgpu_ctx *ctx;
941 struct dma_fence *fence;
2b48d323 942
6f0308eb
DA
943 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
944 if (ctx == NULL)
945 return -EINVAL;
2b48d323 946
6f0308eb
DA
947 r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
948 deps[i].ip_type,
949 deps[i].ip_instance,
950 deps[i].ring, &ring);
951 if (r) {
952 amdgpu_ctx_put(ctx);
953 return r;
954 }
2b48d323 955
6f0308eb
DA
956 fence = amdgpu_ctx_get_fence(ctx, ring,
957 deps[i].handle);
958 if (IS_ERR(fence)) {
959 r = PTR_ERR(fence);
960 amdgpu_ctx_put(ctx);
961 return r;
962 } else if (fence) {
963 r = amdgpu_sync_fence(p->adev, &p->job->sync,
964 fence);
965 dma_fence_put(fence);
966 amdgpu_ctx_put(ctx);
967 if (r)
968 return r;
969 }
970 }
971 return 0;
972}
2b48d323 973
6f0308eb
DA
974static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
975 struct amdgpu_cs_parser *p)
976{
977 int i, r;
76a1ea61 978
6f0308eb
DA
979 for (i = 0; i < p->nchunks; ++i) {
980 struct amdgpu_cs_chunk *chunk;
effd924d 981
6f0308eb 982 chunk = &p->chunks[i];
91e1a520 983
6f0308eb
DA
984 if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
985 r = amdgpu_cs_process_fence_dep(p, chunk);
986 if (r)
987 return r;
2b48d323
CK
988 }
989 }
990
991 return 0;
992}
993
cd75dc68
CK
994static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
995 union drm_amdgpu_cs *cs)
996{
b07c60c0 997 struct amdgpu_ring *ring = p->job->ring;
92f25098 998 struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
cd75dc68 999 struct amdgpu_job *job;
e686941a 1000 int r;
cd75dc68 1001
50838c8c
CK
1002 job = p->job;
1003 p->job = NULL;
cd75dc68 1004
595a9cd6 1005 r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
e686941a 1006 if (r) {
d71518b5 1007 amdgpu_job_free(job);
e686941a 1008 return r;
cd75dc68
CK
1009 }
1010
e686941a 1011 job->owner = p->filp;
3aecd24c 1012 job->fence_ctx = entity->fence_context;
f54d1867 1013 p->fence = dma_fence_get(&job->base.s_fence->finished);
595a9cd6 1014 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
758ac17f 1015 job->uf_sequence = cs->out.handle;
a5fb4ec2 1016 amdgpu_job_free_resources(job);
10e709cb 1017 amdgpu_cs_parser_fini(p, 0, true);
cd75dc68
CK
1018
1019 trace_amdgpu_cs_ioctl(job);
1020 amd_sched_entity_push_job(&job->base);
1021
1022 return 0;
1023}
1024
049fc527
CZ
1025int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1026{
1027 struct amdgpu_device *adev = dev->dev_private;
f1892138 1028 struct amdgpu_fpriv *fpriv = filp->driver_priv;
049fc527 1029 union drm_amdgpu_cs *cs = data;
7e52a81c 1030 struct amdgpu_cs_parser parser = {};
26a6980c
CK
1031 bool reserved_buffers = false;
1032 int i, r;
049fc527 1033
0c418f10 1034 if (!adev->accel_working)
049fc527 1035 return -EBUSY;
f1892138
CZ
1036 if (amdgpu_kms_vram_lost(adev, fpriv))
1037 return -ENODEV;
2b48d323 1038
7e52a81c
CK
1039 parser.adev = adev;
1040 parser.filp = filp;
1041
1042 r = amdgpu_cs_parser_init(&parser, data);
d38ceaf9 1043 if (r) {
049fc527 1044 DRM_ERROR("Failed to initialize parser !\n");
a414cd70 1045 goto out;
26a6980c
CK
1046 }
1047
a414cd70
HR
1048 r = amdgpu_cs_parser_bos(&parser, data);
1049 if (r) {
1050 if (r == -ENOMEM)
1051 DRM_ERROR("Not enough memory for command submission!\n");
1052 else if (r != -ERESTARTSYS)
1053 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1054 goto out;
26a6980c
CK
1055 }
1056
a414cd70
HR
1057 reserved_buffers = true;
1058 r = amdgpu_cs_ib_fill(adev, &parser);
26a6980c
CK
1059 if (r)
1060 goto out;
1061
a414cd70
HR
1062 r = amdgpu_cs_dependencies(adev, &parser);
1063 if (r) {
1064 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1065 goto out;
1066 }
1067
50838c8c 1068 for (i = 0; i < parser.job->num_ibs; i++)
7e52a81c 1069 trace_amdgpu_cs(&parser, i);
26a6980c 1070
7e52a81c 1071 r = amdgpu_cs_ib_vm_chunk(adev, &parser);
4fe63117
CZ
1072 if (r)
1073 goto out;
1074
4acabfe3 1075 r = amdgpu_cs_submit(&parser, cs);
10e709cb
CZ
1076 if (r)
1077 goto out;
d38ceaf9 1078
10e709cb 1079 return 0;
d38ceaf9 1080out:
7e52a81c 1081 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
d38ceaf9
AD
1082 return r;
1083}
1084
1085/**
1086 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1087 *
1088 * @dev: drm device
1089 * @data: data from userspace
1090 * @filp: file private
1091 *
1092 * Wait for the command submission identified by handle to finish.
1093 */
1094int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1095 struct drm_file *filp)
1096{
1097 union drm_amdgpu_wait_cs *wait = data;
1098 struct amdgpu_device *adev = dev->dev_private;
f1892138 1099 struct amdgpu_fpriv *fpriv = filp->driver_priv;
d38ceaf9 1100 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
03507c4f 1101 struct amdgpu_ring *ring = NULL;
66b3cf2a 1102 struct amdgpu_ctx *ctx;
f54d1867 1103 struct dma_fence *fence;
d38ceaf9
AD
1104 long r;
1105
f1892138
CZ
1106 if (amdgpu_kms_vram_lost(adev, fpriv))
1107 return -ENODEV;
21c16bf6 1108
66b3cf2a
JZ
1109 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1110 if (ctx == NULL)
1111 return -EINVAL;
d38ceaf9 1112
effd924d
AR
1113 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
1114 wait->in.ip_type, wait->in.ip_instance,
1115 wait->in.ring, &ring);
1116 if (r) {
1117 amdgpu_ctx_put(ctx);
1118 return r;
1119 }
1120
4b559c90
CZ
1121 fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
1122 if (IS_ERR(fence))
1123 r = PTR_ERR(fence);
1124 else if (fence) {
f54d1867
CW
1125 r = dma_fence_wait_timeout(fence, true, timeout);
1126 dma_fence_put(fence);
4b559c90
CZ
1127 } else
1128 r = 1;
049fc527 1129
66b3cf2a 1130 amdgpu_ctx_put(ctx);
d38ceaf9
AD
1131 if (r < 0)
1132 return r;
1133
1134 memset(wait, 0, sizeof(*wait));
1135 wait->out.status = (r == 0);
1136
1137 return 0;
1138}
1139
eef18a82
JZ
1140/**
1141 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1142 *
1143 * @adev: amdgpu device
1144 * @filp: file private
1145 * @user: drm_amdgpu_fence copied from user space
1146 */
1147static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1148 struct drm_file *filp,
1149 struct drm_amdgpu_fence *user)
1150{
1151 struct amdgpu_ring *ring;
1152 struct amdgpu_ctx *ctx;
1153 struct dma_fence *fence;
1154 int r;
1155
eef18a82
JZ
1156 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1157 if (ctx == NULL)
1158 return ERR_PTR(-EINVAL);
1159
effd924d
AR
1160 r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
1161 user->ip_instance, user->ring, &ring);
1162 if (r) {
1163 amdgpu_ctx_put(ctx);
1164 return ERR_PTR(r);
1165 }
1166
eef18a82
JZ
1167 fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
1168 amdgpu_ctx_put(ctx);
1169
1170 return fence;
1171}
1172
1173/**
1174 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1175 *
1176 * @adev: amdgpu device
1177 * @filp: file private
1178 * @wait: wait parameters
1179 * @fences: array of drm_amdgpu_fence
1180 */
1181static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1182 struct drm_file *filp,
1183 union drm_amdgpu_wait_fences *wait,
1184 struct drm_amdgpu_fence *fences)
1185{
1186 uint32_t fence_count = wait->in.fence_count;
1187 unsigned int i;
1188 long r = 1;
1189
1190 for (i = 0; i < fence_count; i++) {
1191 struct dma_fence *fence;
1192 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1193
1194 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1195 if (IS_ERR(fence))
1196 return PTR_ERR(fence);
1197 else if (!fence)
1198 continue;
1199
1200 r = dma_fence_wait_timeout(fence, true, timeout);
32df87df 1201 dma_fence_put(fence);
eef18a82
JZ
1202 if (r < 0)
1203 return r;
1204
1205 if (r == 0)
1206 break;
1207 }
1208
1209 memset(wait, 0, sizeof(*wait));
1210 wait->out.status = (r > 0);
1211
1212 return 0;
1213}
1214
1215/**
1216 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1217 *
1218 * @adev: amdgpu device
1219 * @filp: file private
1220 * @wait: wait parameters
1221 * @fences: array of drm_amdgpu_fence
1222 */
1223static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1224 struct drm_file *filp,
1225 union drm_amdgpu_wait_fences *wait,
1226 struct drm_amdgpu_fence *fences)
1227{
1228 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1229 uint32_t fence_count = wait->in.fence_count;
1230 uint32_t first = ~0;
1231 struct dma_fence **array;
1232 unsigned int i;
1233 long r;
1234
1235 /* Prepare the fence array */
1236 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1237
1238 if (array == NULL)
1239 return -ENOMEM;
1240
1241 for (i = 0; i < fence_count; i++) {
1242 struct dma_fence *fence;
1243
1244 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1245 if (IS_ERR(fence)) {
1246 r = PTR_ERR(fence);
1247 goto err_free_fence_array;
1248 } else if (fence) {
1249 array[i] = fence;
1250 } else { /* NULL, the fence has been already signaled */
1251 r = 1;
1252 goto out;
1253 }
1254 }
1255
1256 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1257 &first);
1258 if (r < 0)
1259 goto err_free_fence_array;
1260
1261out:
1262 memset(wait, 0, sizeof(*wait));
1263 wait->out.status = (r > 0);
1264 wait->out.first_signaled = first;
1265 /* set return value 0 to indicate success */
1266 r = 0;
1267
1268err_free_fence_array:
1269 for (i = 0; i < fence_count; i++)
1270 dma_fence_put(array[i]);
1271 kfree(array);
1272
1273 return r;
1274}
1275
1276/**
1277 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1278 *
1279 * @dev: drm device
1280 * @data: data from userspace
1281 * @filp: file private
1282 */
1283int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1284 struct drm_file *filp)
1285{
1286 struct amdgpu_device *adev = dev->dev_private;
f1892138 1287 struct amdgpu_fpriv *fpriv = filp->driver_priv;
eef18a82
JZ
1288 union drm_amdgpu_wait_fences *wait = data;
1289 uint32_t fence_count = wait->in.fence_count;
1290 struct drm_amdgpu_fence *fences_user;
1291 struct drm_amdgpu_fence *fences;
1292 int r;
1293
f1892138
CZ
1294 if (amdgpu_kms_vram_lost(adev, fpriv))
1295 return -ENODEV;
eef18a82
JZ
1296 /* Get the fences from userspace */
1297 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1298 GFP_KERNEL);
1299 if (fences == NULL)
1300 return -ENOMEM;
1301
f4e7c7c1 1302 fences_user = (void __user *)(uintptr_t)(wait->in.fences);
eef18a82
JZ
1303 if (copy_from_user(fences, fences_user,
1304 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1305 r = -EFAULT;
1306 goto err_free_fences;
1307 }
1308
1309 if (wait->in.wait_all)
1310 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1311 else
1312 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1313
1314err_free_fences:
1315 kfree(fences);
1316
1317 return r;
1318}
1319
d38ceaf9
AD
1320/**
1321 * amdgpu_cs_find_bo_va - find bo_va for VM address
1322 *
1323 * @parser: command submission parser context
1324 * @addr: VM address
1325 * @bo: resulting BO of the mapping found
1326 *
1327 * Search the buffer objects in the command submission context for a certain
1328 * virtual memory address. Returns allocation structure when found, NULL
1329 * otherwise.
1330 */
1331struct amdgpu_bo_va_mapping *
1332amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1333 uint64_t addr, struct amdgpu_bo **bo)
1334{
d38ceaf9 1335 struct amdgpu_bo_va_mapping *mapping;
15486fd2
CK
1336 unsigned i;
1337
1338 if (!parser->bo_list)
1339 return NULL;
d38ceaf9
AD
1340
1341 addr /= AMDGPU_GPU_PAGE_SIZE;
1342
15486fd2
CK
1343 for (i = 0; i < parser->bo_list->num_entries; i++) {
1344 struct amdgpu_bo_list_entry *lobj;
1345
1346 lobj = &parser->bo_list->array[i];
1347 if (!lobj->bo_va)
d38ceaf9
AD
1348 continue;
1349
15486fd2 1350 list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
a9f87f64
CK
1351 if (mapping->start > addr ||
1352 addr > mapping->last)
7fc11959
CK
1353 continue;
1354
15486fd2 1355 *bo = lobj->bo_va->bo;
7fc11959
CK
1356 return mapping;
1357 }
1358
15486fd2 1359 list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
a9f87f64
CK
1360 if (mapping->start > addr ||
1361 addr > mapping->last)
d38ceaf9
AD
1362 continue;
1363
15486fd2 1364 *bo = lobj->bo_va->bo;
d38ceaf9
AD
1365 return mapping;
1366 }
1367 }
1368
1369 return NULL;
1370}
c855e250
CK
1371
1372/**
1373 * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
1374 *
1375 * @parser: command submission parser context
1376 *
1377 * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
1378 */
1379int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
1380{
1381 unsigned i;
1382 int r;
1383
1384 if (!parser->bo_list)
1385 return 0;
1386
1387 for (i = 0; i < parser->bo_list->num_entries; i++) {
1388 struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
1389
bb990bb0 1390 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
c855e250
CK
1391 if (unlikely(r))
1392 return r;
03f48dd5
CK
1393
1394 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
1395 continue;
1396
1397 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1398 amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
1399 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
1400 if (unlikely(r))
1401 return r;
c855e250
CK
1402 }
1403
1404 return 0;
1405}