2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/pagemap.h>
28 #include <linux/sync_file.h>
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_syncobj.h>
33 #include "amdgpu_trace.h"
34 #include "amdgpu_gmc.h"
35 #include "amdgpu_gem.h"
37 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser
*p
,
38 struct drm_amdgpu_cs_chunk_fence
*data
,
41 struct drm_gem_object
*gobj
;
46 gobj
= drm_gem_object_lookup(p
->filp
, data
->handle
);
50 bo
= amdgpu_bo_ref(gem_to_amdgpu_bo(gobj
));
51 p
->uf_entry
.priority
= 0;
52 p
->uf_entry
.tv
.bo
= &bo
->tbo
;
53 p
->uf_entry
.tv
.shared
= true;
54 p
->uf_entry
.user_pages
= NULL
;
56 drm_gem_object_put_unlocked(gobj
);
58 size
= amdgpu_bo_size(bo
);
59 if (size
!= PAGE_SIZE
|| (data
->offset
+ 8) > size
) {
64 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
69 *offset
= data
->offset
;
78 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser
*p
,
79 struct drm_amdgpu_bo_list_in
*data
)
82 struct drm_amdgpu_bo_list_entry
*info
= NULL
;
84 r
= amdgpu_bo_create_list_entry_array(data
, &info
);
88 r
= amdgpu_bo_list_create(p
->adev
, p
->filp
, info
, data
->bo_number
,
103 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser
*p
, union drm_amdgpu_cs
*cs
)
105 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
106 struct amdgpu_vm
*vm
= &fpriv
->vm
;
107 uint64_t *chunk_array_user
;
108 uint64_t *chunk_array
;
109 unsigned size
, num_ibs
= 0;
110 uint32_t uf_offset
= 0;
114 if (cs
->in
.num_chunks
== 0)
117 chunk_array
= kmalloc_array(cs
->in
.num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
121 p
->ctx
= amdgpu_ctx_get(fpriv
, cs
->in
.ctx_id
);
127 /* skip guilty context job */
128 if (atomic_read(&p
->ctx
->guilty
) == 1) {
133 mutex_lock(&p
->ctx
->lock
);
136 chunk_array_user
= u64_to_user_ptr(cs
->in
.chunks
);
137 if (copy_from_user(chunk_array
, chunk_array_user
,
138 sizeof(uint64_t)*cs
->in
.num_chunks
)) {
143 p
->nchunks
= cs
->in
.num_chunks
;
144 p
->chunks
= kmalloc_array(p
->nchunks
, sizeof(struct amdgpu_cs_chunk
),
151 for (i
= 0; i
< p
->nchunks
; i
++) {
152 struct drm_amdgpu_cs_chunk __user
**chunk_ptr
= NULL
;
153 struct drm_amdgpu_cs_chunk user_chunk
;
154 uint32_t __user
*cdata
;
156 chunk_ptr
= u64_to_user_ptr(chunk_array
[i
]);
157 if (copy_from_user(&user_chunk
, chunk_ptr
,
158 sizeof(struct drm_amdgpu_cs_chunk
))) {
161 goto free_partial_kdata
;
163 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
164 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
166 size
= p
->chunks
[i
].length_dw
;
167 cdata
= u64_to_user_ptr(user_chunk
.chunk_data
);
169 p
->chunks
[i
].kdata
= kvmalloc_array(size
, sizeof(uint32_t), GFP_KERNEL
);
170 if (p
->chunks
[i
].kdata
== NULL
) {
173 goto free_partial_kdata
;
175 size
*= sizeof(uint32_t);
176 if (copy_from_user(p
->chunks
[i
].kdata
, cdata
, size
)) {
178 goto free_partial_kdata
;
181 switch (p
->chunks
[i
].chunk_id
) {
182 case AMDGPU_CHUNK_ID_IB
:
186 case AMDGPU_CHUNK_ID_FENCE
:
187 size
= sizeof(struct drm_amdgpu_cs_chunk_fence
);
188 if (p
->chunks
[i
].length_dw
* sizeof(uint32_t) < size
) {
190 goto free_partial_kdata
;
193 ret
= amdgpu_cs_user_fence_chunk(p
, p
->chunks
[i
].kdata
,
196 goto free_partial_kdata
;
200 case AMDGPU_CHUNK_ID_BO_HANDLES
:
201 size
= sizeof(struct drm_amdgpu_bo_list_in
);
202 if (p
->chunks
[i
].length_dw
* sizeof(uint32_t) < size
) {
204 goto free_partial_kdata
;
207 ret
= amdgpu_cs_bo_handles_chunk(p
, p
->chunks
[i
].kdata
);
209 goto free_partial_kdata
;
213 case AMDGPU_CHUNK_ID_DEPENDENCIES
:
214 case AMDGPU_CHUNK_ID_SYNCOBJ_IN
:
215 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT
:
220 goto free_partial_kdata
;
224 ret
= amdgpu_job_alloc(p
->adev
, num_ibs
, &p
->job
, vm
);
228 if (p
->ctx
->vram_lost_counter
!= p
->job
->vram_lost_counter
) {
233 if (p
->uf_entry
.tv
.bo
)
234 p
->job
->uf_addr
= uf_offset
;
237 /* Use this opportunity to fill in task info for the vm */
238 amdgpu_vm_set_task_info(vm
);
246 kvfree(p
->chunks
[i
].kdata
);
256 /* Convert microseconds to bytes. */
257 static u64
us_to_bytes(struct amdgpu_device
*adev
, s64 us
)
259 if (us
<= 0 || !adev
->mm_stats
.log2_max_MBps
)
262 /* Since accum_us is incremented by a million per second, just
263 * multiply it by the number of MB/s to get the number of bytes.
265 return us
<< adev
->mm_stats
.log2_max_MBps
;
268 static s64
bytes_to_us(struct amdgpu_device
*adev
, u64 bytes
)
270 if (!adev
->mm_stats
.log2_max_MBps
)
273 return bytes
>> adev
->mm_stats
.log2_max_MBps
;
276 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
277 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
278 * which means it can go over the threshold once. If that happens, the driver
279 * will be in debt and no other buffer migrations can be done until that debt
282 * This approach allows moving a buffer of any size (it's important to allow
285 * The currency is simply time in microseconds and it increases as the clock
286 * ticks. The accumulated microseconds (us) are converted to bytes and
289 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device
*adev
,
293 s64 time_us
, increment_us
;
294 u64 free_vram
, total_vram
, used_vram
;
296 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
299 * It means that in order to get full max MBps, at least 5 IBs per
300 * second must be submitted and not more than 200ms apart from each
303 const s64 us_upper_bound
= 200000;
305 if (!adev
->mm_stats
.log2_max_MBps
) {
311 total_vram
= adev
->gmc
.real_vram_size
- atomic64_read(&adev
->vram_pin_size
);
312 used_vram
= amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
313 free_vram
= used_vram
>= total_vram
? 0 : total_vram
- used_vram
;
315 spin_lock(&adev
->mm_stats
.lock
);
317 /* Increase the amount of accumulated us. */
318 time_us
= ktime_to_us(ktime_get());
319 increment_us
= time_us
- adev
->mm_stats
.last_update_us
;
320 adev
->mm_stats
.last_update_us
= time_us
;
321 adev
->mm_stats
.accum_us
= min(adev
->mm_stats
.accum_us
+ increment_us
,
324 /* This prevents the short period of low performance when the VRAM
325 * usage is low and the driver is in debt or doesn't have enough
326 * accumulated us to fill VRAM quickly.
328 * The situation can occur in these cases:
329 * - a lot of VRAM is freed by userspace
330 * - the presence of a big buffer causes a lot of evictions
331 * (solution: split buffers into smaller ones)
333 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
334 * accum_us to a positive number.
336 if (free_vram
>= 128 * 1024 * 1024 || free_vram
>= total_vram
/ 8) {
339 /* Be more aggresive on dGPUs. Try to fill a portion of free
342 if (!(adev
->flags
& AMD_IS_APU
))
343 min_us
= bytes_to_us(adev
, free_vram
/ 4);
345 min_us
= 0; /* Reset accum_us on APUs. */
347 adev
->mm_stats
.accum_us
= max(min_us
, adev
->mm_stats
.accum_us
);
350 /* This is set to 0 if the driver is in debt to disallow (optional)
353 *max_bytes
= us_to_bytes(adev
, adev
->mm_stats
.accum_us
);
355 /* Do the same for visible VRAM if half of it is free */
356 if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
)) {
357 u64 total_vis_vram
= adev
->gmc
.visible_vram_size
;
359 amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
361 if (used_vis_vram
< total_vis_vram
) {
362 u64 free_vis_vram
= total_vis_vram
- used_vis_vram
;
363 adev
->mm_stats
.accum_us_vis
= min(adev
->mm_stats
.accum_us_vis
+
364 increment_us
, us_upper_bound
);
366 if (free_vis_vram
>= total_vis_vram
/ 2)
367 adev
->mm_stats
.accum_us_vis
=
368 max(bytes_to_us(adev
, free_vis_vram
/ 2),
369 adev
->mm_stats
.accum_us_vis
);
372 *max_vis_bytes
= us_to_bytes(adev
, adev
->mm_stats
.accum_us_vis
);
377 spin_unlock(&adev
->mm_stats
.lock
);
380 /* Report how many bytes have really been moved for the last command
381 * submission. This can result in a debt that can stop buffer migrations
384 void amdgpu_cs_report_moved_bytes(struct amdgpu_device
*adev
, u64 num_bytes
,
387 spin_lock(&adev
->mm_stats
.lock
);
388 adev
->mm_stats
.accum_us
-= bytes_to_us(adev
, num_bytes
);
389 adev
->mm_stats
.accum_us_vis
-= bytes_to_us(adev
, num_vis_bytes
);
390 spin_unlock(&adev
->mm_stats
.lock
);
393 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser
*p
,
394 struct amdgpu_bo
*bo
)
396 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
397 struct ttm_operation_ctx ctx
= {
398 .interruptible
= true,
399 .no_wait_gpu
= false,
400 .resv
= bo
->tbo
.resv
,
409 /* Don't move this buffer if we have depleted our allowance
410 * to move it. Don't move anything if the threshold is zero.
412 if (p
->bytes_moved
< p
->bytes_moved_threshold
) {
413 if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
414 (bo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)) {
415 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
416 * visible VRAM if we've depleted our allowance to do
419 if (p
->bytes_moved_vis
< p
->bytes_moved_vis_threshold
)
420 domain
= bo
->preferred_domains
;
422 domain
= bo
->allowed_domains
;
424 domain
= bo
->preferred_domains
;
427 domain
= bo
->allowed_domains
;
431 amdgpu_bo_placement_from_domain(bo
, domain
);
432 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
434 p
->bytes_moved
+= ctx
.bytes_moved
;
435 if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
436 amdgpu_bo_in_cpu_visible_vram(bo
))
437 p
->bytes_moved_vis
+= ctx
.bytes_moved
;
439 if (unlikely(r
== -ENOMEM
) && domain
!= bo
->allowed_domains
) {
440 domain
= bo
->allowed_domains
;
447 /* Last resort, try to evict something from the current working set */
448 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser
*p
,
449 struct amdgpu_bo
*validated
)
451 uint32_t domain
= validated
->allowed_domains
;
452 struct ttm_operation_ctx ctx
= { true, false };
458 for (;&p
->evictable
->tv
.head
!= &p
->validated
;
459 p
->evictable
= list_prev_entry(p
->evictable
, tv
.head
)) {
461 struct amdgpu_bo_list_entry
*candidate
= p
->evictable
;
462 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(candidate
->tv
.bo
);
463 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
464 bool update_bytes_moved_vis
;
467 /* If we reached our current BO we can forget it */
471 /* We can't move pinned BOs here */
475 other
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
477 /* Check if this BO is in one of the domains we need space for */
478 if (!(other
& domain
))
481 /* Check if we can move this BO somewhere else */
482 other
= bo
->allowed_domains
& ~domain
;
486 /* Good we can try to move this BO somewhere else */
487 update_bytes_moved_vis
=
488 !amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
489 amdgpu_bo_in_cpu_visible_vram(bo
);
490 amdgpu_bo_placement_from_domain(bo
, other
);
491 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
492 p
->bytes_moved
+= ctx
.bytes_moved
;
493 if (update_bytes_moved_vis
)
494 p
->bytes_moved_vis
+= ctx
.bytes_moved
;
499 p
->evictable
= list_prev_entry(p
->evictable
, tv
.head
);
500 list_move(&candidate
->tv
.head
, &p
->validated
);
508 static int amdgpu_cs_validate(void *param
, struct amdgpu_bo
*bo
)
510 struct amdgpu_cs_parser
*p
= param
;
514 r
= amdgpu_cs_bo_validate(p
, bo
);
515 } while (r
== -ENOMEM
&& amdgpu_cs_try_evict(p
, bo
));
520 r
= amdgpu_cs_bo_validate(p
, bo
->shadow
);
525 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser
*p
,
526 struct list_head
*validated
)
528 struct ttm_operation_ctx ctx
= { true, false };
529 struct amdgpu_bo_list_entry
*lobj
;
532 list_for_each_entry(lobj
, validated
, tv
.head
) {
533 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(lobj
->tv
.bo
);
534 bool binding_userptr
= false;
535 struct mm_struct
*usermm
;
537 usermm
= amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
);
538 if (usermm
&& usermm
!= current
->mm
)
541 /* Check if we have user pages and nobody bound the BO already */
542 if (amdgpu_ttm_tt_userptr_needs_pages(bo
->tbo
.ttm
) &&
544 amdgpu_bo_placement_from_domain(bo
,
545 AMDGPU_GEM_DOMAIN_CPU
);
546 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
549 amdgpu_ttm_tt_set_user_pages(bo
->tbo
.ttm
,
551 binding_userptr
= true;
554 if (p
->evictable
== lobj
)
557 r
= amdgpu_cs_validate(p
, bo
);
561 if (binding_userptr
) {
562 kvfree(lobj
->user_pages
);
563 lobj
->user_pages
= NULL
;
569 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser
*p
,
570 union drm_amdgpu_cs
*cs
)
572 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
573 struct amdgpu_vm
*vm
= &fpriv
->vm
;
574 struct amdgpu_bo_list_entry
*e
;
575 struct list_head duplicates
;
576 struct amdgpu_bo
*gds
;
577 struct amdgpu_bo
*gws
;
578 struct amdgpu_bo
*oa
;
582 INIT_LIST_HEAD(&p
->validated
);
584 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
585 if (cs
->in
.bo_list_handle
) {
589 r
= amdgpu_bo_list_get(fpriv
, cs
->in
.bo_list_handle
,
593 } else if (!p
->bo_list
) {
594 /* Create a empty bo_list when no handle is provided */
595 r
= amdgpu_bo_list_create(p
->adev
, p
->filp
, NULL
, 0,
601 amdgpu_bo_list_get_list(p
->bo_list
, &p
->validated
);
602 if (p
->bo_list
->first_userptr
!= p
->bo_list
->num_entries
)
603 p
->mn
= amdgpu_mn_get(p
->adev
, AMDGPU_MN_TYPE_GFX
);
605 INIT_LIST_HEAD(&duplicates
);
606 amdgpu_vm_get_pd_bo(&fpriv
->vm
, &p
->validated
, &p
->vm_pd
);
608 if (p
->uf_entry
.tv
.bo
&& !ttm_to_amdgpu_bo(p
->uf_entry
.tv
.bo
)->parent
)
609 list_add(&p
->uf_entry
.tv
.head
, &p
->validated
);
612 struct list_head need_pages
;
614 r
= ttm_eu_reserve_buffers(&p
->ticket
, &p
->validated
, true,
616 if (unlikely(r
!= 0)) {
617 if (r
!= -ERESTARTSYS
)
618 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
619 goto error_free_pages
;
622 INIT_LIST_HEAD(&need_pages
);
623 amdgpu_bo_list_for_each_userptr_entry(e
, p
->bo_list
) {
624 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
626 if (amdgpu_ttm_tt_userptr_invalidated(bo
->tbo
.ttm
,
627 &e
->user_invalidated
) && e
->user_pages
) {
629 /* We acquired a page array, but somebody
630 * invalidated it. Free it and try again
632 release_pages(e
->user_pages
,
633 bo
->tbo
.ttm
->num_pages
);
634 kvfree(e
->user_pages
);
635 e
->user_pages
= NULL
;
638 if (amdgpu_ttm_tt_userptr_needs_pages(bo
->tbo
.ttm
) &&
640 list_del(&e
->tv
.head
);
641 list_add(&e
->tv
.head
, &need_pages
);
643 amdgpu_bo_unreserve(bo
);
647 if (list_empty(&need_pages
))
650 /* Unreserve everything again. */
651 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
653 /* We tried too many times, just abort */
656 DRM_ERROR("deadlock in %s\n", __func__
);
657 goto error_free_pages
;
660 /* Fill the page arrays for all userptrs. */
661 list_for_each_entry(e
, &need_pages
, tv
.head
) {
662 struct ttm_tt
*ttm
= e
->tv
.bo
->ttm
;
664 e
->user_pages
= kvmalloc_array(ttm
->num_pages
,
665 sizeof(struct page
*),
666 GFP_KERNEL
| __GFP_ZERO
);
667 if (!e
->user_pages
) {
669 DRM_ERROR("calloc failure in %s\n", __func__
);
670 goto error_free_pages
;
673 r
= amdgpu_ttm_tt_get_user_pages(ttm
, e
->user_pages
);
675 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
676 kvfree(e
->user_pages
);
677 e
->user_pages
= NULL
;
678 goto error_free_pages
;
683 list_splice(&need_pages
, &p
->validated
);
686 amdgpu_cs_get_threshold_for_moves(p
->adev
, &p
->bytes_moved_threshold
,
687 &p
->bytes_moved_vis_threshold
);
689 p
->bytes_moved_vis
= 0;
690 p
->evictable
= list_last_entry(&p
->validated
,
691 struct amdgpu_bo_list_entry
,
694 r
= amdgpu_vm_validate_pt_bos(p
->adev
, &fpriv
->vm
,
695 amdgpu_cs_validate
, p
);
697 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
701 r
= amdgpu_cs_list_validate(p
, &duplicates
);
703 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
707 r
= amdgpu_cs_list_validate(p
, &p
->validated
);
709 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
713 amdgpu_cs_report_moved_bytes(p
->adev
, p
->bytes_moved
,
716 gds
= p
->bo_list
->gds_obj
;
717 gws
= p
->bo_list
->gws_obj
;
718 oa
= p
->bo_list
->oa_obj
;
720 amdgpu_bo_list_for_each_entry(e
, p
->bo_list
)
721 e
->bo_va
= amdgpu_vm_bo_find(vm
, ttm_to_amdgpu_bo(e
->tv
.bo
));
724 p
->job
->gds_base
= amdgpu_bo_gpu_offset(gds
);
725 p
->job
->gds_size
= amdgpu_bo_size(gds
);
728 p
->job
->gws_base
= amdgpu_bo_gpu_offset(gws
);
729 p
->job
->gws_size
= amdgpu_bo_size(gws
);
732 p
->job
->oa_base
= amdgpu_bo_gpu_offset(oa
);
733 p
->job
->oa_size
= amdgpu_bo_size(oa
);
736 if (!r
&& p
->uf_entry
.tv
.bo
) {
737 struct amdgpu_bo
*uf
= ttm_to_amdgpu_bo(p
->uf_entry
.tv
.bo
);
739 r
= amdgpu_ttm_alloc_gart(&uf
->tbo
);
740 p
->job
->uf_addr
+= amdgpu_bo_gpu_offset(uf
);
745 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
749 amdgpu_bo_list_for_each_userptr_entry(e
, p
->bo_list
) {
753 release_pages(e
->user_pages
, e
->tv
.bo
->ttm
->num_pages
);
754 kvfree(e
->user_pages
);
760 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser
*p
)
762 struct amdgpu_bo_list_entry
*e
;
765 list_for_each_entry(e
, &p
->validated
, tv
.head
) {
766 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
767 struct reservation_object
*resv
= bo
->tbo
.resv
;
769 r
= amdgpu_sync_resv(p
->adev
, &p
->job
->sync
, resv
, p
->filp
,
770 amdgpu_bo_explicit_sync(bo
));
779 * cs_parser_fini() - clean parser states
780 * @parser: parser structure holding parsing context.
781 * @error: error number
783 * If error is set than unvalidate buffer, otherwise just free memory
784 * used by parsing context.
786 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser
*parser
, int error
,
791 if (error
&& backoff
)
792 ttm_eu_backoff_reservation(&parser
->ticket
,
795 for (i
= 0; i
< parser
->num_post_dep_syncobjs
; i
++)
796 drm_syncobj_put(parser
->post_dep_syncobjs
[i
]);
797 kfree(parser
->post_dep_syncobjs
);
799 dma_fence_put(parser
->fence
);
802 mutex_unlock(&parser
->ctx
->lock
);
803 amdgpu_ctx_put(parser
->ctx
);
806 amdgpu_bo_list_put(parser
->bo_list
);
808 for (i
= 0; i
< parser
->nchunks
; i
++)
809 kvfree(parser
->chunks
[i
].kdata
);
810 kfree(parser
->chunks
);
812 amdgpu_job_free(parser
->job
);
813 if (parser
->uf_entry
.tv
.bo
) {
814 struct amdgpu_bo
*uf
= ttm_to_amdgpu_bo(parser
->uf_entry
.tv
.bo
);
816 amdgpu_bo_unref(&uf
);
820 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser
*p
)
822 struct amdgpu_ring
*ring
= to_amdgpu_ring(p
->entity
->rq
->sched
);
823 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
824 struct amdgpu_device
*adev
= p
->adev
;
825 struct amdgpu_vm
*vm
= &fpriv
->vm
;
826 struct amdgpu_bo_list_entry
*e
;
827 struct amdgpu_bo_va
*bo_va
;
828 struct amdgpu_bo
*bo
;
831 /* Only for UVD/VCE VM emulation */
832 if (ring
->funcs
->parse_cs
|| ring
->funcs
->patch_cs_in_place
) {
835 for (i
= 0, j
= 0; i
< p
->nchunks
&& j
< p
->job
->num_ibs
; i
++) {
836 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
837 struct amdgpu_bo_va_mapping
*m
;
838 struct amdgpu_bo
*aobj
= NULL
;
839 struct amdgpu_cs_chunk
*chunk
;
840 uint64_t offset
, va_start
;
841 struct amdgpu_ib
*ib
;
844 chunk
= &p
->chunks
[i
];
845 ib
= &p
->job
->ibs
[j
];
846 chunk_ib
= chunk
->kdata
;
848 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
851 va_start
= chunk_ib
->va_start
& AMDGPU_GMC_HOLE_MASK
;
852 r
= amdgpu_cs_find_mapping(p
, va_start
, &aobj
, &m
);
854 DRM_ERROR("IB va_start is invalid\n");
858 if ((va_start
+ chunk_ib
->ib_bytes
) >
859 (m
->last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
860 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
864 /* the IB should be reserved at this point */
865 r
= amdgpu_bo_kmap(aobj
, (void **)&kptr
);
870 offset
= m
->start
* AMDGPU_GPU_PAGE_SIZE
;
871 kptr
+= va_start
- offset
;
873 if (ring
->funcs
->parse_cs
) {
874 memcpy(ib
->ptr
, kptr
, chunk_ib
->ib_bytes
);
875 amdgpu_bo_kunmap(aobj
);
877 r
= amdgpu_ring_parse_cs(ring
, p
, j
);
881 ib
->ptr
= (uint32_t *)kptr
;
882 r
= amdgpu_ring_patch_cs_in_place(ring
, p
, j
);
883 amdgpu_bo_kunmap(aobj
);
893 return amdgpu_cs_sync_rings(p
);
896 r
= amdgpu_vm_clear_freed(adev
, vm
, NULL
);
900 r
= amdgpu_vm_bo_update(adev
, fpriv
->prt_va
, false);
904 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
,
905 fpriv
->prt_va
->last_pt_update
, false);
909 if (amdgpu_sriov_vf(adev
)) {
912 bo_va
= fpriv
->csa_va
;
914 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
918 f
= bo_va
->last_pt_update
;
919 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, f
, false);
924 amdgpu_bo_list_for_each_entry(e
, p
->bo_list
) {
927 /* ignore duplicates */
928 bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
936 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
940 f
= bo_va
->last_pt_update
;
941 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, f
, false);
946 r
= amdgpu_vm_handle_moved(adev
, vm
);
950 r
= amdgpu_vm_update_directories(adev
, vm
);
954 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, vm
->last_update
, false);
958 r
= reservation_object_reserve_shared(vm
->root
.base
.bo
->tbo
.resv
);
962 p
->job
->vm_pd_addr
= amdgpu_gmc_pd_addr(vm
->root
.base
.bo
);
964 if (amdgpu_vm_debug
) {
965 /* Invalidate all BOs to test for userspace bugs */
966 amdgpu_bo_list_for_each_entry(e
, p
->bo_list
) {
967 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
969 /* ignore duplicates */
973 amdgpu_vm_bo_invalidate(adev
, bo
, false);
977 return amdgpu_cs_sync_rings(p
);
980 static int amdgpu_cs_ib_fill(struct amdgpu_device
*adev
,
981 struct amdgpu_cs_parser
*parser
)
983 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
984 struct amdgpu_vm
*vm
= &fpriv
->vm
;
985 int r
, ce_preempt
= 0, de_preempt
= 0;
986 struct amdgpu_ring
*ring
;
989 for (i
= 0, j
= 0; i
< parser
->nchunks
&& j
< parser
->job
->num_ibs
; i
++) {
990 struct amdgpu_cs_chunk
*chunk
;
991 struct amdgpu_ib
*ib
;
992 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
993 struct drm_sched_entity
*entity
;
995 chunk
= &parser
->chunks
[i
];
996 ib
= &parser
->job
->ibs
[j
];
997 chunk_ib
= (struct drm_amdgpu_cs_chunk_ib
*)chunk
->kdata
;
999 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
1002 if (chunk_ib
->ip_type
== AMDGPU_HW_IP_GFX
&& amdgpu_sriov_vf(adev
)) {
1003 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_PREEMPT
) {
1004 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_CE
)
1010 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
1011 if (ce_preempt
> 1 || de_preempt
> 1)
1015 r
= amdgpu_ctx_get_entity(parser
->ctx
, chunk_ib
->ip_type
,
1016 chunk_ib
->ip_instance
, chunk_ib
->ring
,
1021 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_PREAMBLE
)
1022 parser
->job
->preamble_status
|=
1023 AMDGPU_PREAMBLE_IB_PRESENT
;
1025 if (parser
->entity
&& parser
->entity
!= entity
)
1028 parser
->entity
= entity
;
1030 ring
= to_amdgpu_ring(entity
->rq
->sched
);
1031 r
= amdgpu_ib_get(adev
, vm
, ring
->funcs
->parse_cs
?
1032 chunk_ib
->ib_bytes
: 0, ib
);
1034 DRM_ERROR("Failed to get ib !\n");
1038 ib
->gpu_addr
= chunk_ib
->va_start
;
1039 ib
->length_dw
= chunk_ib
->ib_bytes
/ 4;
1040 ib
->flags
= chunk_ib
->flags
;
1045 /* UVD & VCE fw doesn't support user fences */
1046 ring
= to_amdgpu_ring(parser
->entity
->rq
->sched
);
1047 if (parser
->job
->uf_addr
&& (
1048 ring
->funcs
->type
== AMDGPU_RING_TYPE_UVD
||
1049 ring
->funcs
->type
== AMDGPU_RING_TYPE_VCE
))
1052 return amdgpu_ctx_wait_prev_fence(parser
->ctx
, parser
->entity
);
1055 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser
*p
,
1056 struct amdgpu_cs_chunk
*chunk
)
1058 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
1061 struct drm_amdgpu_cs_chunk_dep
*deps
;
1063 deps
= (struct drm_amdgpu_cs_chunk_dep
*)chunk
->kdata
;
1064 num_deps
= chunk
->length_dw
* 4 /
1065 sizeof(struct drm_amdgpu_cs_chunk_dep
);
1067 for (i
= 0; i
< num_deps
; ++i
) {
1068 struct amdgpu_ctx
*ctx
;
1069 struct drm_sched_entity
*entity
;
1070 struct dma_fence
*fence
;
1072 ctx
= amdgpu_ctx_get(fpriv
, deps
[i
].ctx_id
);
1076 r
= amdgpu_ctx_get_entity(ctx
, deps
[i
].ip_type
,
1077 deps
[i
].ip_instance
,
1078 deps
[i
].ring
, &entity
);
1080 amdgpu_ctx_put(ctx
);
1084 fence
= amdgpu_ctx_get_fence(ctx
, entity
,
1086 if (IS_ERR(fence
)) {
1088 amdgpu_ctx_put(ctx
);
1091 r
= amdgpu_sync_fence(p
->adev
, &p
->job
->sync
, fence
,
1093 dma_fence_put(fence
);
1094 amdgpu_ctx_put(ctx
);
1102 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser
*p
,
1106 struct dma_fence
*fence
;
1107 r
= drm_syncobj_find_fence(p
->filp
, handle
, &fence
);
1111 r
= amdgpu_sync_fence(p
->adev
, &p
->job
->sync
, fence
, true);
1112 dma_fence_put(fence
);
1117 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser
*p
,
1118 struct amdgpu_cs_chunk
*chunk
)
1122 struct drm_amdgpu_cs_chunk_sem
*deps
;
1124 deps
= (struct drm_amdgpu_cs_chunk_sem
*)chunk
->kdata
;
1125 num_deps
= chunk
->length_dw
* 4 /
1126 sizeof(struct drm_amdgpu_cs_chunk_sem
);
1128 for (i
= 0; i
< num_deps
; ++i
) {
1129 r
= amdgpu_syncobj_lookup_and_add_to_sync(p
, deps
[i
].handle
);
1136 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser
*p
,
1137 struct amdgpu_cs_chunk
*chunk
)
1141 struct drm_amdgpu_cs_chunk_sem
*deps
;
1142 deps
= (struct drm_amdgpu_cs_chunk_sem
*)chunk
->kdata
;
1143 num_deps
= chunk
->length_dw
* 4 /
1144 sizeof(struct drm_amdgpu_cs_chunk_sem
);
1146 p
->post_dep_syncobjs
= kmalloc_array(num_deps
,
1147 sizeof(struct drm_syncobj
*),
1149 p
->num_post_dep_syncobjs
= 0;
1151 if (!p
->post_dep_syncobjs
)
1154 for (i
= 0; i
< num_deps
; ++i
) {
1155 p
->post_dep_syncobjs
[i
] = drm_syncobj_find(p
->filp
, deps
[i
].handle
);
1156 if (!p
->post_dep_syncobjs
[i
])
1158 p
->num_post_dep_syncobjs
++;
1163 static int amdgpu_cs_dependencies(struct amdgpu_device
*adev
,
1164 struct amdgpu_cs_parser
*p
)
1168 for (i
= 0; i
< p
->nchunks
; ++i
) {
1169 struct amdgpu_cs_chunk
*chunk
;
1171 chunk
= &p
->chunks
[i
];
1173 if (chunk
->chunk_id
== AMDGPU_CHUNK_ID_DEPENDENCIES
) {
1174 r
= amdgpu_cs_process_fence_dep(p
, chunk
);
1177 } else if (chunk
->chunk_id
== AMDGPU_CHUNK_ID_SYNCOBJ_IN
) {
1178 r
= amdgpu_cs_process_syncobj_in_dep(p
, chunk
);
1181 } else if (chunk
->chunk_id
== AMDGPU_CHUNK_ID_SYNCOBJ_OUT
) {
1182 r
= amdgpu_cs_process_syncobj_out_dep(p
, chunk
);
1191 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser
*p
)
1195 for (i
= 0; i
< p
->num_post_dep_syncobjs
; ++i
)
1196 drm_syncobj_replace_fence(p
->post_dep_syncobjs
[i
], p
->fence
);
1199 static int amdgpu_cs_submit(struct amdgpu_cs_parser
*p
,
1200 union drm_amdgpu_cs
*cs
)
1202 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
1203 struct drm_sched_entity
*entity
= p
->entity
;
1204 enum drm_sched_priority priority
;
1205 struct amdgpu_ring
*ring
;
1206 struct amdgpu_bo_list_entry
*e
;
1207 struct amdgpu_job
*job
;
1215 r
= drm_sched_job_init(&job
->base
, entity
, p
->filp
);
1219 /* No memory allocation is allowed while holding the mn lock */
1220 amdgpu_mn_lock(p
->mn
);
1221 amdgpu_bo_list_for_each_userptr_entry(e
, p
->bo_list
) {
1222 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
1224 if (amdgpu_ttm_tt_userptr_needs_pages(bo
->tbo
.ttm
)) {
1230 job
->owner
= p
->filp
;
1231 p
->fence
= dma_fence_get(&job
->base
.s_fence
->finished
);
1233 amdgpu_ctx_add_fence(p
->ctx
, entity
, p
->fence
, &seq
);
1234 amdgpu_cs_post_dependencies(p
);
1236 if ((job
->preamble_status
& AMDGPU_PREAMBLE_IB_PRESENT
) &&
1237 !p
->ctx
->preamble_presented
) {
1238 job
->preamble_status
|= AMDGPU_PREAMBLE_IB_PRESENT_FIRST
;
1239 p
->ctx
->preamble_presented
= true;
1242 cs
->out
.handle
= seq
;
1243 job
->uf_sequence
= seq
;
1245 amdgpu_job_free_resources(job
);
1247 trace_amdgpu_cs_ioctl(job
);
1248 amdgpu_vm_bo_trace_cs(&fpriv
->vm
, &p
->ticket
);
1249 priority
= job
->base
.s_priority
;
1250 drm_sched_entity_push_job(&job
->base
, entity
);
1252 ring
= to_amdgpu_ring(entity
->rq
->sched
);
1253 amdgpu_ring_priority_get(ring
, priority
);
1255 amdgpu_vm_move_to_lru_tail(p
->adev
, &fpriv
->vm
);
1257 ttm_eu_fence_buffer_objects(&p
->ticket
, &p
->validated
, p
->fence
);
1258 amdgpu_mn_unlock(p
->mn
);
1263 dma_fence_put(&job
->base
.s_fence
->finished
);
1264 job
->base
.s_fence
= NULL
;
1265 amdgpu_mn_unlock(p
->mn
);
1268 amdgpu_job_free(job
);
1272 int amdgpu_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
1274 struct amdgpu_device
*adev
= dev
->dev_private
;
1275 union drm_amdgpu_cs
*cs
= data
;
1276 struct amdgpu_cs_parser parser
= {};
1277 bool reserved_buffers
= false;
1280 if (!adev
->accel_working
)
1286 r
= amdgpu_cs_parser_init(&parser
, data
);
1288 DRM_ERROR("Failed to initialize parser !\n");
1292 r
= amdgpu_cs_ib_fill(adev
, &parser
);
1296 r
= amdgpu_cs_dependencies(adev
, &parser
);
1298 DRM_ERROR("Failed in the dependencies handling %d!\n", r
);
1302 r
= amdgpu_cs_parser_bos(&parser
, data
);
1305 DRM_ERROR("Not enough memory for command submission!\n");
1306 else if (r
!= -ERESTARTSYS
)
1307 DRM_ERROR("Failed to process the buffer list %d!\n", r
);
1311 reserved_buffers
= true;
1313 for (i
= 0; i
< parser
.job
->num_ibs
; i
++)
1314 trace_amdgpu_cs(&parser
, i
);
1316 r
= amdgpu_cs_vm_handling(&parser
);
1320 r
= amdgpu_cs_submit(&parser
, cs
);
1323 amdgpu_cs_parser_fini(&parser
, r
, reserved_buffers
);
1328 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1331 * @data: data from userspace
1332 * @filp: file private
1334 * Wait for the command submission identified by handle to finish.
1336 int amdgpu_cs_wait_ioctl(struct drm_device
*dev
, void *data
,
1337 struct drm_file
*filp
)
1339 union drm_amdgpu_wait_cs
*wait
= data
;
1340 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout
);
1341 struct drm_sched_entity
*entity
;
1342 struct amdgpu_ctx
*ctx
;
1343 struct dma_fence
*fence
;
1346 ctx
= amdgpu_ctx_get(filp
->driver_priv
, wait
->in
.ctx_id
);
1350 r
= amdgpu_ctx_get_entity(ctx
, wait
->in
.ip_type
, wait
->in
.ip_instance
,
1351 wait
->in
.ring
, &entity
);
1353 amdgpu_ctx_put(ctx
);
1357 fence
= amdgpu_ctx_get_fence(ctx
, entity
, wait
->in
.handle
);
1361 r
= dma_fence_wait_timeout(fence
, true, timeout
);
1362 if (r
> 0 && fence
->error
)
1364 dma_fence_put(fence
);
1368 amdgpu_ctx_put(ctx
);
1372 memset(wait
, 0, sizeof(*wait
));
1373 wait
->out
.status
= (r
== 0);
1379 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1381 * @adev: amdgpu device
1382 * @filp: file private
1383 * @user: drm_amdgpu_fence copied from user space
1385 static struct dma_fence
*amdgpu_cs_get_fence(struct amdgpu_device
*adev
,
1386 struct drm_file
*filp
,
1387 struct drm_amdgpu_fence
*user
)
1389 struct drm_sched_entity
*entity
;
1390 struct amdgpu_ctx
*ctx
;
1391 struct dma_fence
*fence
;
1394 ctx
= amdgpu_ctx_get(filp
->driver_priv
, user
->ctx_id
);
1396 return ERR_PTR(-EINVAL
);
1398 r
= amdgpu_ctx_get_entity(ctx
, user
->ip_type
, user
->ip_instance
,
1399 user
->ring
, &entity
);
1401 amdgpu_ctx_put(ctx
);
1405 fence
= amdgpu_ctx_get_fence(ctx
, entity
, user
->seq_no
);
1406 amdgpu_ctx_put(ctx
);
1411 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device
*dev
, void *data
,
1412 struct drm_file
*filp
)
1414 struct amdgpu_device
*adev
= dev
->dev_private
;
1415 union drm_amdgpu_fence_to_handle
*info
= data
;
1416 struct dma_fence
*fence
;
1417 struct drm_syncobj
*syncobj
;
1418 struct sync_file
*sync_file
;
1421 fence
= amdgpu_cs_get_fence(adev
, filp
, &info
->in
.fence
);
1423 return PTR_ERR(fence
);
1425 switch (info
->in
.what
) {
1426 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ
:
1427 r
= drm_syncobj_create(&syncobj
, 0, fence
);
1428 dma_fence_put(fence
);
1431 r
= drm_syncobj_get_handle(filp
, syncobj
, &info
->out
.handle
);
1432 drm_syncobj_put(syncobj
);
1435 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD
:
1436 r
= drm_syncobj_create(&syncobj
, 0, fence
);
1437 dma_fence_put(fence
);
1440 r
= drm_syncobj_get_fd(syncobj
, (int*)&info
->out
.handle
);
1441 drm_syncobj_put(syncobj
);
1444 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD
:
1445 fd
= get_unused_fd_flags(O_CLOEXEC
);
1447 dma_fence_put(fence
);
1451 sync_file
= sync_file_create(fence
);
1452 dma_fence_put(fence
);
1458 fd_install(fd
, sync_file
->file
);
1459 info
->out
.handle
= fd
;
1468 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1470 * @adev: amdgpu device
1471 * @filp: file private
1472 * @wait: wait parameters
1473 * @fences: array of drm_amdgpu_fence
1475 static int amdgpu_cs_wait_all_fences(struct amdgpu_device
*adev
,
1476 struct drm_file
*filp
,
1477 union drm_amdgpu_wait_fences
*wait
,
1478 struct drm_amdgpu_fence
*fences
)
1480 uint32_t fence_count
= wait
->in
.fence_count
;
1484 for (i
= 0; i
< fence_count
; i
++) {
1485 struct dma_fence
*fence
;
1486 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout_ns
);
1488 fence
= amdgpu_cs_get_fence(adev
, filp
, &fences
[i
]);
1490 return PTR_ERR(fence
);
1494 r
= dma_fence_wait_timeout(fence
, true, timeout
);
1495 dma_fence_put(fence
);
1503 return fence
->error
;
1506 memset(wait
, 0, sizeof(*wait
));
1507 wait
->out
.status
= (r
> 0);
1513 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1515 * @adev: amdgpu device
1516 * @filp: file private
1517 * @wait: wait parameters
1518 * @fences: array of drm_amdgpu_fence
1520 static int amdgpu_cs_wait_any_fence(struct amdgpu_device
*adev
,
1521 struct drm_file
*filp
,
1522 union drm_amdgpu_wait_fences
*wait
,
1523 struct drm_amdgpu_fence
*fences
)
1525 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout_ns
);
1526 uint32_t fence_count
= wait
->in
.fence_count
;
1527 uint32_t first
= ~0;
1528 struct dma_fence
**array
;
1532 /* Prepare the fence array */
1533 array
= kcalloc(fence_count
, sizeof(struct dma_fence
*), GFP_KERNEL
);
1538 for (i
= 0; i
< fence_count
; i
++) {
1539 struct dma_fence
*fence
;
1541 fence
= amdgpu_cs_get_fence(adev
, filp
, &fences
[i
]);
1542 if (IS_ERR(fence
)) {
1544 goto err_free_fence_array
;
1547 } else { /* NULL, the fence has been already signaled */
1554 r
= dma_fence_wait_any_timeout(array
, fence_count
, true, timeout
,
1557 goto err_free_fence_array
;
1560 memset(wait
, 0, sizeof(*wait
));
1561 wait
->out
.status
= (r
> 0);
1562 wait
->out
.first_signaled
= first
;
1564 if (first
< fence_count
&& array
[first
])
1565 r
= array
[first
]->error
;
1569 err_free_fence_array
:
1570 for (i
= 0; i
< fence_count
; i
++)
1571 dma_fence_put(array
[i
]);
1578 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1581 * @data: data from userspace
1582 * @filp: file private
1584 int amdgpu_cs_wait_fences_ioctl(struct drm_device
*dev
, void *data
,
1585 struct drm_file
*filp
)
1587 struct amdgpu_device
*adev
= dev
->dev_private
;
1588 union drm_amdgpu_wait_fences
*wait
= data
;
1589 uint32_t fence_count
= wait
->in
.fence_count
;
1590 struct drm_amdgpu_fence
*fences_user
;
1591 struct drm_amdgpu_fence
*fences
;
1594 /* Get the fences from userspace */
1595 fences
= kmalloc_array(fence_count
, sizeof(struct drm_amdgpu_fence
),
1600 fences_user
= u64_to_user_ptr(wait
->in
.fences
);
1601 if (copy_from_user(fences
, fences_user
,
1602 sizeof(struct drm_amdgpu_fence
) * fence_count
)) {
1604 goto err_free_fences
;
1607 if (wait
->in
.wait_all
)
1608 r
= amdgpu_cs_wait_all_fences(adev
, filp
, wait
, fences
);
1610 r
= amdgpu_cs_wait_any_fence(adev
, filp
, wait
, fences
);
1619 * amdgpu_cs_find_bo_va - find bo_va for VM address
1621 * @parser: command submission parser context
1623 * @bo: resulting BO of the mapping found
1625 * Search the buffer objects in the command submission context for a certain
1626 * virtual memory address. Returns allocation structure when found, NULL
1629 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser
*parser
,
1630 uint64_t addr
, struct amdgpu_bo
**bo
,
1631 struct amdgpu_bo_va_mapping
**map
)
1633 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
1634 struct ttm_operation_ctx ctx
= { false, false };
1635 struct amdgpu_vm
*vm
= &fpriv
->vm
;
1636 struct amdgpu_bo_va_mapping
*mapping
;
1639 addr
/= AMDGPU_GPU_PAGE_SIZE
;
1641 mapping
= amdgpu_vm_bo_lookup_mapping(vm
, addr
);
1642 if (!mapping
|| !mapping
->bo_va
|| !mapping
->bo_va
->base
.bo
)
1645 *bo
= mapping
->bo_va
->base
.bo
;
1648 /* Double check that the BO is reserved by this CS */
1649 if (READ_ONCE((*bo
)->tbo
.resv
->lock
.ctx
) != &parser
->ticket
)
1652 if (!((*bo
)->flags
& AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
)) {
1653 (*bo
)->flags
|= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
1654 amdgpu_bo_placement_from_domain(*bo
, (*bo
)->allowed_domains
);
1655 r
= ttm_bo_validate(&(*bo
)->tbo
, &(*bo
)->placement
, &ctx
);
1660 return amdgpu_ttm_alloc_gart(&(*bo
)->tbo
);