2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/pagemap.h>
28 #include <linux/sync_file.h>
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_syncobj.h>
33 #include "amdgpu_trace.h"
35 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser
*p
,
36 struct drm_amdgpu_cs_chunk_fence
*data
,
39 struct drm_gem_object
*gobj
;
42 gobj
= drm_gem_object_lookup(p
->filp
, data
->handle
);
46 p
->uf_entry
.robj
= amdgpu_bo_ref(gem_to_amdgpu_bo(gobj
));
47 p
->uf_entry
.priority
= 0;
48 p
->uf_entry
.tv
.bo
= &p
->uf_entry
.robj
->tbo
;
49 p
->uf_entry
.tv
.shared
= true;
50 p
->uf_entry
.user_pages
= NULL
;
52 size
= amdgpu_bo_size(p
->uf_entry
.robj
);
53 if (size
!= PAGE_SIZE
|| (data
->offset
+ 8) > size
)
56 *offset
= data
->offset
;
58 drm_gem_object_put_unlocked(gobj
);
60 if (amdgpu_ttm_tt_get_usermm(p
->uf_entry
.robj
->tbo
.ttm
)) {
61 amdgpu_bo_unref(&p
->uf_entry
.robj
);
68 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser
*p
, void *data
)
70 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
71 struct amdgpu_vm
*vm
= &fpriv
->vm
;
72 union drm_amdgpu_cs
*cs
= data
;
73 uint64_t *chunk_array_user
;
74 uint64_t *chunk_array
;
75 unsigned size
, num_ibs
= 0;
76 uint32_t uf_offset
= 0;
80 if (cs
->in
.num_chunks
== 0)
83 chunk_array
= kmalloc_array(cs
->in
.num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
87 p
->ctx
= amdgpu_ctx_get(fpriv
, cs
->in
.ctx_id
);
93 mutex_lock(&p
->ctx
->lock
);
96 chunk_array_user
= u64_to_user_ptr(cs
->in
.chunks
);
97 if (copy_from_user(chunk_array
, chunk_array_user
,
98 sizeof(uint64_t)*cs
->in
.num_chunks
)) {
103 p
->nchunks
= cs
->in
.num_chunks
;
104 p
->chunks
= kmalloc_array(p
->nchunks
, sizeof(struct amdgpu_cs_chunk
),
111 for (i
= 0; i
< p
->nchunks
; i
++) {
112 struct drm_amdgpu_cs_chunk __user
**chunk_ptr
= NULL
;
113 struct drm_amdgpu_cs_chunk user_chunk
;
114 uint32_t __user
*cdata
;
116 chunk_ptr
= u64_to_user_ptr(chunk_array
[i
]);
117 if (copy_from_user(&user_chunk
, chunk_ptr
,
118 sizeof(struct drm_amdgpu_cs_chunk
))) {
121 goto free_partial_kdata
;
123 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
124 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
126 size
= p
->chunks
[i
].length_dw
;
127 cdata
= u64_to_user_ptr(user_chunk
.chunk_data
);
129 p
->chunks
[i
].kdata
= kvmalloc_array(size
, sizeof(uint32_t), GFP_KERNEL
);
130 if (p
->chunks
[i
].kdata
== NULL
) {
133 goto free_partial_kdata
;
135 size
*= sizeof(uint32_t);
136 if (copy_from_user(p
->chunks
[i
].kdata
, cdata
, size
)) {
138 goto free_partial_kdata
;
141 switch (p
->chunks
[i
].chunk_id
) {
142 case AMDGPU_CHUNK_ID_IB
:
146 case AMDGPU_CHUNK_ID_FENCE
:
147 size
= sizeof(struct drm_amdgpu_cs_chunk_fence
);
148 if (p
->chunks
[i
].length_dw
* sizeof(uint32_t) < size
) {
150 goto free_partial_kdata
;
153 ret
= amdgpu_cs_user_fence_chunk(p
, p
->chunks
[i
].kdata
,
156 goto free_partial_kdata
;
160 case AMDGPU_CHUNK_ID_DEPENDENCIES
:
161 case AMDGPU_CHUNK_ID_SYNCOBJ_IN
:
162 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT
:
167 goto free_partial_kdata
;
171 ret
= amdgpu_job_alloc(p
->adev
, num_ibs
, &p
->job
, vm
);
175 if (p
->ctx
->vram_lost_counter
!= p
->job
->vram_lost_counter
) {
180 if (p
->uf_entry
.robj
)
181 p
->job
->uf_addr
= uf_offset
;
189 kvfree(p
->chunks
[i
].kdata
);
199 /* Convert microseconds to bytes. */
200 static u64
us_to_bytes(struct amdgpu_device
*adev
, s64 us
)
202 if (us
<= 0 || !adev
->mm_stats
.log2_max_MBps
)
205 /* Since accum_us is incremented by a million per second, just
206 * multiply it by the number of MB/s to get the number of bytes.
208 return us
<< adev
->mm_stats
.log2_max_MBps
;
211 static s64
bytes_to_us(struct amdgpu_device
*adev
, u64 bytes
)
213 if (!adev
->mm_stats
.log2_max_MBps
)
216 return bytes
>> adev
->mm_stats
.log2_max_MBps
;
219 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
220 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
221 * which means it can go over the threshold once. If that happens, the driver
222 * will be in debt and no other buffer migrations can be done until that debt
225 * This approach allows moving a buffer of any size (it's important to allow
228 * The currency is simply time in microseconds and it increases as the clock
229 * ticks. The accumulated microseconds (us) are converted to bytes and
232 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device
*adev
,
236 s64 time_us
, increment_us
;
237 u64 free_vram
, total_vram
, used_vram
;
239 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
242 * It means that in order to get full max MBps, at least 5 IBs per
243 * second must be submitted and not more than 200ms apart from each
246 const s64 us_upper_bound
= 200000;
248 if (!adev
->mm_stats
.log2_max_MBps
) {
254 total_vram
= adev
->mc
.real_vram_size
- adev
->vram_pin_size
;
255 used_vram
= amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
256 free_vram
= used_vram
>= total_vram
? 0 : total_vram
- used_vram
;
258 spin_lock(&adev
->mm_stats
.lock
);
260 /* Increase the amount of accumulated us. */
261 time_us
= ktime_to_us(ktime_get());
262 increment_us
= time_us
- adev
->mm_stats
.last_update_us
;
263 adev
->mm_stats
.last_update_us
= time_us
;
264 adev
->mm_stats
.accum_us
= min(adev
->mm_stats
.accum_us
+ increment_us
,
267 /* This prevents the short period of low performance when the VRAM
268 * usage is low and the driver is in debt or doesn't have enough
269 * accumulated us to fill VRAM quickly.
271 * The situation can occur in these cases:
272 * - a lot of VRAM is freed by userspace
273 * - the presence of a big buffer causes a lot of evictions
274 * (solution: split buffers into smaller ones)
276 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
277 * accum_us to a positive number.
279 if (free_vram
>= 128 * 1024 * 1024 || free_vram
>= total_vram
/ 8) {
282 /* Be more aggresive on dGPUs. Try to fill a portion of free
285 if (!(adev
->flags
& AMD_IS_APU
))
286 min_us
= bytes_to_us(adev
, free_vram
/ 4);
288 min_us
= 0; /* Reset accum_us on APUs. */
290 adev
->mm_stats
.accum_us
= max(min_us
, adev
->mm_stats
.accum_us
);
293 /* This is set to 0 if the driver is in debt to disallow (optional)
296 *max_bytes
= us_to_bytes(adev
, adev
->mm_stats
.accum_us
);
298 /* Do the same for visible VRAM if half of it is free */
299 if (adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
) {
300 u64 total_vis_vram
= adev
->mc
.visible_vram_size
;
302 amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
304 if (used_vis_vram
< total_vis_vram
) {
305 u64 free_vis_vram
= total_vis_vram
- used_vis_vram
;
306 adev
->mm_stats
.accum_us_vis
= min(adev
->mm_stats
.accum_us_vis
+
307 increment_us
, us_upper_bound
);
309 if (free_vis_vram
>= total_vis_vram
/ 2)
310 adev
->mm_stats
.accum_us_vis
=
311 max(bytes_to_us(adev
, free_vis_vram
/ 2),
312 adev
->mm_stats
.accum_us_vis
);
315 *max_vis_bytes
= us_to_bytes(adev
, adev
->mm_stats
.accum_us_vis
);
320 spin_unlock(&adev
->mm_stats
.lock
);
323 /* Report how many bytes have really been moved for the last command
324 * submission. This can result in a debt that can stop buffer migrations
327 void amdgpu_cs_report_moved_bytes(struct amdgpu_device
*adev
, u64 num_bytes
,
330 spin_lock(&adev
->mm_stats
.lock
);
331 adev
->mm_stats
.accum_us
-= bytes_to_us(adev
, num_bytes
);
332 adev
->mm_stats
.accum_us_vis
-= bytes_to_us(adev
, num_vis_bytes
);
333 spin_unlock(&adev
->mm_stats
.lock
);
336 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser
*p
,
337 struct amdgpu_bo
*bo
)
339 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
340 u64 initial_bytes_moved
, bytes_moved
;
347 /* Don't move this buffer if we have depleted our allowance
348 * to move it. Don't move anything if the threshold is zero.
350 if (p
->bytes_moved
< p
->bytes_moved_threshold
) {
351 if (adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
&&
352 (bo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)) {
353 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
354 * visible VRAM if we've depleted our allowance to do
357 if (p
->bytes_moved_vis
< p
->bytes_moved_vis_threshold
)
358 domain
= bo
->preferred_domains
;
360 domain
= bo
->allowed_domains
;
362 domain
= bo
->preferred_domains
;
365 domain
= bo
->allowed_domains
;
369 amdgpu_ttm_placement_from_domain(bo
, domain
);
370 initial_bytes_moved
= atomic64_read(&adev
->num_bytes_moved
);
371 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
372 bytes_moved
= atomic64_read(&adev
->num_bytes_moved
) -
374 p
->bytes_moved
+= bytes_moved
;
375 if (adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
&&
376 bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
&&
377 bo
->tbo
.mem
.start
< adev
->mc
.visible_vram_size
>> PAGE_SHIFT
)
378 p
->bytes_moved_vis
+= bytes_moved
;
380 if (unlikely(r
== -ENOMEM
) && domain
!= bo
->allowed_domains
) {
381 domain
= bo
->allowed_domains
;
388 /* Last resort, try to evict something from the current working set */
389 static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser
*p
,
390 struct amdgpu_bo
*validated
)
392 uint32_t domain
= validated
->allowed_domains
;
398 for (;&p
->evictable
->tv
.head
!= &p
->validated
;
399 p
->evictable
= list_prev_entry(p
->evictable
, tv
.head
)) {
401 struct amdgpu_bo_list_entry
*candidate
= p
->evictable
;
402 struct amdgpu_bo
*bo
= candidate
->robj
;
403 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
404 u64 initial_bytes_moved
, bytes_moved
;
405 bool update_bytes_moved_vis
;
408 /* If we reached our current BO we can forget it */
409 if (candidate
->robj
== validated
)
412 other
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
414 /* Check if this BO is in one of the domains we need space for */
415 if (!(other
& domain
))
418 /* Check if we can move this BO somewhere else */
419 other
= bo
->allowed_domains
& ~domain
;
423 /* Good we can try to move this BO somewhere else */
424 amdgpu_ttm_placement_from_domain(bo
, other
);
425 update_bytes_moved_vis
=
426 adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
&&
427 bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
&&
428 bo
->tbo
.mem
.start
< adev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
429 initial_bytes_moved
= atomic64_read(&adev
->num_bytes_moved
);
430 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
431 bytes_moved
= atomic64_read(&adev
->num_bytes_moved
) -
433 p
->bytes_moved
+= bytes_moved
;
434 if (update_bytes_moved_vis
)
435 p
->bytes_moved_vis
+= bytes_moved
;
440 p
->evictable
= list_prev_entry(p
->evictable
, tv
.head
);
441 list_move(&candidate
->tv
.head
, &p
->validated
);
449 static int amdgpu_cs_validate(void *param
, struct amdgpu_bo
*bo
)
451 struct amdgpu_cs_parser
*p
= param
;
455 r
= amdgpu_cs_bo_validate(p
, bo
);
456 } while (r
== -ENOMEM
&& amdgpu_cs_try_evict(p
, bo
));
461 r
= amdgpu_cs_bo_validate(p
, bo
->shadow
);
466 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser
*p
,
467 struct list_head
*validated
)
469 struct amdgpu_bo_list_entry
*lobj
;
472 list_for_each_entry(lobj
, validated
, tv
.head
) {
473 struct amdgpu_bo
*bo
= lobj
->robj
;
474 bool binding_userptr
= false;
475 struct mm_struct
*usermm
;
477 usermm
= amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
);
478 if (usermm
&& usermm
!= current
->mm
)
481 /* Check if we have user pages and nobody bound the BO already */
482 if (amdgpu_ttm_tt_userptr_needs_pages(bo
->tbo
.ttm
) &&
484 amdgpu_ttm_placement_from_domain(bo
,
485 AMDGPU_GEM_DOMAIN_CPU
);
486 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true,
490 amdgpu_ttm_tt_set_user_pages(bo
->tbo
.ttm
,
492 binding_userptr
= true;
495 if (p
->evictable
== lobj
)
498 r
= amdgpu_cs_validate(p
, bo
);
502 if (binding_userptr
) {
503 kvfree(lobj
->user_pages
);
504 lobj
->user_pages
= NULL
;
510 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser
*p
,
511 union drm_amdgpu_cs
*cs
)
513 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
514 struct amdgpu_bo_list_entry
*e
;
515 struct list_head duplicates
;
516 unsigned i
, tries
= 10;
519 INIT_LIST_HEAD(&p
->validated
);
521 p
->bo_list
= amdgpu_bo_list_get(fpriv
, cs
->in
.bo_list_handle
);
523 amdgpu_bo_list_get_list(p
->bo_list
, &p
->validated
);
524 if (p
->bo_list
->first_userptr
!= p
->bo_list
->num_entries
)
525 p
->mn
= amdgpu_mn_get(p
->adev
);
528 INIT_LIST_HEAD(&duplicates
);
529 amdgpu_vm_get_pd_bo(&fpriv
->vm
, &p
->validated
, &p
->vm_pd
);
531 if (p
->uf_entry
.robj
)
532 list_add(&p
->uf_entry
.tv
.head
, &p
->validated
);
535 struct list_head need_pages
;
538 r
= ttm_eu_reserve_buffers(&p
->ticket
, &p
->validated
, true,
540 if (unlikely(r
!= 0)) {
541 if (r
!= -ERESTARTSYS
)
542 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
543 goto error_free_pages
;
546 /* Without a BO list we don't have userptr BOs */
550 INIT_LIST_HEAD(&need_pages
);
551 for (i
= p
->bo_list
->first_userptr
;
552 i
< p
->bo_list
->num_entries
; ++i
) {
553 struct amdgpu_bo
*bo
;
555 e
= &p
->bo_list
->array
[i
];
558 if (amdgpu_ttm_tt_userptr_invalidated(bo
->tbo
.ttm
,
559 &e
->user_invalidated
) && e
->user_pages
) {
561 /* We acquired a page array, but somebody
562 * invalidated it. Free it and try again
564 release_pages(e
->user_pages
,
565 bo
->tbo
.ttm
->num_pages
);
566 kvfree(e
->user_pages
);
567 e
->user_pages
= NULL
;
570 if (amdgpu_ttm_tt_userptr_needs_pages(bo
->tbo
.ttm
) &&
572 list_del(&e
->tv
.head
);
573 list_add(&e
->tv
.head
, &need_pages
);
575 amdgpu_bo_unreserve(e
->robj
);
579 if (list_empty(&need_pages
))
582 /* Unreserve everything again. */
583 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
585 /* We tried too many times, just abort */
588 DRM_ERROR("deadlock in %s\n", __func__
);
589 goto error_free_pages
;
592 /* Fill the page arrays for all userptrs. */
593 list_for_each_entry(e
, &need_pages
, tv
.head
) {
594 struct ttm_tt
*ttm
= e
->robj
->tbo
.ttm
;
596 e
->user_pages
= kvmalloc_array(ttm
->num_pages
,
597 sizeof(struct page
*),
598 GFP_KERNEL
| __GFP_ZERO
);
599 if (!e
->user_pages
) {
601 DRM_ERROR("calloc failure in %s\n", __func__
);
602 goto error_free_pages
;
605 r
= amdgpu_ttm_tt_get_user_pages(ttm
, e
->user_pages
);
607 DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
608 kvfree(e
->user_pages
);
609 e
->user_pages
= NULL
;
610 goto error_free_pages
;
615 list_splice(&need_pages
, &p
->validated
);
618 amdgpu_cs_get_threshold_for_moves(p
->adev
, &p
->bytes_moved_threshold
,
619 &p
->bytes_moved_vis_threshold
);
621 p
->bytes_moved_vis
= 0;
622 p
->evictable
= list_last_entry(&p
->validated
,
623 struct amdgpu_bo_list_entry
,
626 r
= amdgpu_vm_validate_pt_bos(p
->adev
, &fpriv
->vm
,
627 amdgpu_cs_validate
, p
);
629 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
633 r
= amdgpu_cs_list_validate(p
, &duplicates
);
635 DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
639 r
= amdgpu_cs_list_validate(p
, &p
->validated
);
641 DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
645 amdgpu_cs_report_moved_bytes(p
->adev
, p
->bytes_moved
,
648 struct amdgpu_bo
*gds
= p
->bo_list
->gds_obj
;
649 struct amdgpu_bo
*gws
= p
->bo_list
->gws_obj
;
650 struct amdgpu_bo
*oa
= p
->bo_list
->oa_obj
;
651 struct amdgpu_vm
*vm
= &fpriv
->vm
;
654 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++) {
655 struct amdgpu_bo
*bo
= p
->bo_list
->array
[i
].robj
;
657 p
->bo_list
->array
[i
].bo_va
= amdgpu_vm_bo_find(vm
, bo
);
661 p
->job
->gds_base
= amdgpu_bo_gpu_offset(gds
);
662 p
->job
->gds_size
= amdgpu_bo_size(gds
);
665 p
->job
->gws_base
= amdgpu_bo_gpu_offset(gws
);
666 p
->job
->gws_size
= amdgpu_bo_size(gws
);
669 p
->job
->oa_base
= amdgpu_bo_gpu_offset(oa
);
670 p
->job
->oa_size
= amdgpu_bo_size(oa
);
674 if (!r
&& p
->uf_entry
.robj
) {
675 struct amdgpu_bo
*uf
= p
->uf_entry
.robj
;
677 r
= amdgpu_ttm_bind(&uf
->tbo
, &uf
->tbo
.mem
);
678 p
->job
->uf_addr
+= amdgpu_bo_gpu_offset(uf
);
683 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
688 for (i
= p
->bo_list
->first_userptr
;
689 i
< p
->bo_list
->num_entries
; ++i
) {
690 e
= &p
->bo_list
->array
[i
];
695 release_pages(e
->user_pages
,
696 e
->robj
->tbo
.ttm
->num_pages
);
697 kvfree(e
->user_pages
);
704 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser
*p
)
706 struct amdgpu_bo_list_entry
*e
;
709 list_for_each_entry(e
, &p
->validated
, tv
.head
) {
710 struct reservation_object
*resv
= e
->robj
->tbo
.resv
;
711 r
= amdgpu_sync_resv(p
->adev
, &p
->job
->sync
, resv
, p
->filp
,
712 amdgpu_bo_explicit_sync(e
->robj
));
721 * cs_parser_fini() - clean parser states
722 * @parser: parser structure holding parsing context.
723 * @error: error number
725 * If error is set than unvalidate buffer, otherwise just free memory
726 * used by parsing context.
728 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser
*parser
, int error
,
733 if (error
&& backoff
)
734 ttm_eu_backoff_reservation(&parser
->ticket
,
737 for (i
= 0; i
< parser
->num_post_dep_syncobjs
; i
++)
738 drm_syncobj_put(parser
->post_dep_syncobjs
[i
]);
739 kfree(parser
->post_dep_syncobjs
);
741 dma_fence_put(parser
->fence
);
744 mutex_unlock(&parser
->ctx
->lock
);
745 amdgpu_ctx_put(parser
->ctx
);
748 amdgpu_bo_list_put(parser
->bo_list
);
750 for (i
= 0; i
< parser
->nchunks
; i
++)
751 kvfree(parser
->chunks
[i
].kdata
);
752 kfree(parser
->chunks
);
754 amdgpu_job_free(parser
->job
);
755 amdgpu_bo_unref(&parser
->uf_entry
.robj
);
758 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser
*p
)
760 struct amdgpu_device
*adev
= p
->adev
;
761 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
762 struct amdgpu_vm
*vm
= &fpriv
->vm
;
763 struct amdgpu_bo_va
*bo_va
;
764 struct amdgpu_bo
*bo
;
767 r
= amdgpu_vm_update_directories(adev
, vm
);
771 r
= amdgpu_vm_clear_freed(adev
, vm
, NULL
);
775 r
= amdgpu_vm_bo_update(adev
, fpriv
->prt_va
, false);
779 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
,
780 fpriv
->prt_va
->last_pt_update
);
784 if (amdgpu_sriov_vf(adev
)) {
787 bo_va
= fpriv
->csa_va
;
789 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
793 f
= bo_va
->last_pt_update
;
794 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, f
);
800 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++) {
803 /* ignore duplicates */
804 bo
= p
->bo_list
->array
[i
].robj
;
808 bo_va
= p
->bo_list
->array
[i
].bo_va
;
812 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
816 f
= bo_va
->last_pt_update
;
817 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, f
);
824 r
= amdgpu_vm_handle_moved(adev
, vm
);
828 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, vm
->last_update
);
832 if (amdgpu_vm_debug
&& p
->bo_list
) {
833 /* Invalidate all BOs to test for userspace bugs */
834 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++) {
835 /* ignore duplicates */
836 bo
= p
->bo_list
->array
[i
].robj
;
840 amdgpu_vm_bo_invalidate(adev
, bo
, false);
847 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device
*adev
,
848 struct amdgpu_cs_parser
*p
)
850 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
851 struct amdgpu_vm
*vm
= &fpriv
->vm
;
852 struct amdgpu_ring
*ring
= p
->job
->ring
;
855 /* Only for UVD/VCE VM emulation */
856 if (p
->job
->ring
->funcs
->parse_cs
) {
859 for (i
= 0, j
= 0; i
< p
->nchunks
&& j
< p
->job
->num_ibs
; i
++) {
860 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
861 struct amdgpu_bo_va_mapping
*m
;
862 struct amdgpu_bo
*aobj
= NULL
;
863 struct amdgpu_cs_chunk
*chunk
;
864 struct amdgpu_ib
*ib
;
868 chunk
= &p
->chunks
[i
];
869 ib
= &p
->job
->ibs
[j
];
870 chunk_ib
= chunk
->kdata
;
872 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
875 r
= amdgpu_cs_find_mapping(p
, chunk_ib
->va_start
,
878 DRM_ERROR("IB va_start is invalid\n");
882 if ((chunk_ib
->va_start
+ chunk_ib
->ib_bytes
) >
883 (m
->last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
884 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
888 /* the IB should be reserved at this point */
889 r
= amdgpu_bo_kmap(aobj
, (void **)&kptr
);
894 offset
= m
->start
* AMDGPU_GPU_PAGE_SIZE
;
895 kptr
+= chunk_ib
->va_start
- offset
;
897 memcpy(ib
->ptr
, kptr
, chunk_ib
->ib_bytes
);
898 amdgpu_bo_kunmap(aobj
);
900 r
= amdgpu_ring_parse_cs(ring
, p
, j
);
909 p
->job
->vm_pd_addr
= amdgpu_bo_gpu_offset(vm
->root
.base
.bo
);
911 r
= amdgpu_bo_vm_update_pte(p
);
916 return amdgpu_cs_sync_rings(p
);
919 static int amdgpu_cs_ib_fill(struct amdgpu_device
*adev
,
920 struct amdgpu_cs_parser
*parser
)
922 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
923 struct amdgpu_vm
*vm
= &fpriv
->vm
;
925 int r
, ce_preempt
= 0, de_preempt
= 0;
927 for (i
= 0, j
= 0; i
< parser
->nchunks
&& j
< parser
->job
->num_ibs
; i
++) {
928 struct amdgpu_cs_chunk
*chunk
;
929 struct amdgpu_ib
*ib
;
930 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
931 struct amdgpu_ring
*ring
;
933 chunk
= &parser
->chunks
[i
];
934 ib
= &parser
->job
->ibs
[j
];
935 chunk_ib
= (struct drm_amdgpu_cs_chunk_ib
*)chunk
->kdata
;
937 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
940 if (chunk_ib
->ip_type
== AMDGPU_HW_IP_GFX
&& amdgpu_sriov_vf(adev
)) {
941 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_PREEMPT
) {
942 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_CE
)
948 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
949 if (ce_preempt
> 1 || de_preempt
> 1)
953 r
= amdgpu_queue_mgr_map(adev
, &parser
->ctx
->queue_mgr
, chunk_ib
->ip_type
,
954 chunk_ib
->ip_instance
, chunk_ib
->ring
, &ring
);
958 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_PREAMBLE
) {
959 parser
->job
->preamble_status
|= AMDGPU_PREAMBLE_IB_PRESENT
;
960 if (!parser
->ctx
->preamble_presented
) {
961 parser
->job
->preamble_status
|= AMDGPU_PREAMBLE_IB_PRESENT_FIRST
;
962 parser
->ctx
->preamble_presented
= true;
966 if (parser
->job
->ring
&& parser
->job
->ring
!= ring
)
969 parser
->job
->ring
= ring
;
971 r
= amdgpu_ib_get(adev
, vm
,
972 ring
->funcs
->parse_cs
? chunk_ib
->ib_bytes
: 0,
975 DRM_ERROR("Failed to get ib !\n");
979 ib
->gpu_addr
= chunk_ib
->va_start
;
980 ib
->length_dw
= chunk_ib
->ib_bytes
/ 4;
981 ib
->flags
= chunk_ib
->flags
;
986 /* UVD & VCE fw doesn't support user fences */
987 if (parser
->job
->uf_addr
&& (
988 parser
->job
->ring
->funcs
->type
== AMDGPU_RING_TYPE_UVD
||
989 parser
->job
->ring
->funcs
->type
== AMDGPU_RING_TYPE_VCE
))
992 return amdgpu_ctx_wait_prev_fence(parser
->ctx
, parser
->job
->ring
->idx
);
995 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser
*p
,
996 struct amdgpu_cs_chunk
*chunk
)
998 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
1001 struct drm_amdgpu_cs_chunk_dep
*deps
;
1003 deps
= (struct drm_amdgpu_cs_chunk_dep
*)chunk
->kdata
;
1004 num_deps
= chunk
->length_dw
* 4 /
1005 sizeof(struct drm_amdgpu_cs_chunk_dep
);
1007 for (i
= 0; i
< num_deps
; ++i
) {
1008 struct amdgpu_ring
*ring
;
1009 struct amdgpu_ctx
*ctx
;
1010 struct dma_fence
*fence
;
1012 ctx
= amdgpu_ctx_get(fpriv
, deps
[i
].ctx_id
);
1016 r
= amdgpu_queue_mgr_map(p
->adev
, &ctx
->queue_mgr
,
1018 deps
[i
].ip_instance
,
1019 deps
[i
].ring
, &ring
);
1021 amdgpu_ctx_put(ctx
);
1025 fence
= amdgpu_ctx_get_fence(ctx
, ring
,
1027 if (IS_ERR(fence
)) {
1029 amdgpu_ctx_put(ctx
);
1032 r
= amdgpu_sync_fence(p
->adev
, &p
->job
->sync
,
1034 dma_fence_put(fence
);
1035 amdgpu_ctx_put(ctx
);
1043 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser
*p
,
1047 struct dma_fence
*fence
;
1048 r
= drm_syncobj_find_fence(p
->filp
, handle
, &fence
);
1052 r
= amdgpu_sync_fence(p
->adev
, &p
->job
->sync
, fence
);
1053 dma_fence_put(fence
);
1058 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser
*p
,
1059 struct amdgpu_cs_chunk
*chunk
)
1063 struct drm_amdgpu_cs_chunk_sem
*deps
;
1065 deps
= (struct drm_amdgpu_cs_chunk_sem
*)chunk
->kdata
;
1066 num_deps
= chunk
->length_dw
* 4 /
1067 sizeof(struct drm_amdgpu_cs_chunk_sem
);
1069 for (i
= 0; i
< num_deps
; ++i
) {
1070 r
= amdgpu_syncobj_lookup_and_add_to_sync(p
, deps
[i
].handle
);
1077 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser
*p
,
1078 struct amdgpu_cs_chunk
*chunk
)
1082 struct drm_amdgpu_cs_chunk_sem
*deps
;
1083 deps
= (struct drm_amdgpu_cs_chunk_sem
*)chunk
->kdata
;
1084 num_deps
= chunk
->length_dw
* 4 /
1085 sizeof(struct drm_amdgpu_cs_chunk_sem
);
1087 p
->post_dep_syncobjs
= kmalloc_array(num_deps
,
1088 sizeof(struct drm_syncobj
*),
1090 p
->num_post_dep_syncobjs
= 0;
1092 if (!p
->post_dep_syncobjs
)
1095 for (i
= 0; i
< num_deps
; ++i
) {
1096 p
->post_dep_syncobjs
[i
] = drm_syncobj_find(p
->filp
, deps
[i
].handle
);
1097 if (!p
->post_dep_syncobjs
[i
])
1099 p
->num_post_dep_syncobjs
++;
1104 static int amdgpu_cs_dependencies(struct amdgpu_device
*adev
,
1105 struct amdgpu_cs_parser
*p
)
1109 for (i
= 0; i
< p
->nchunks
; ++i
) {
1110 struct amdgpu_cs_chunk
*chunk
;
1112 chunk
= &p
->chunks
[i
];
1114 if (chunk
->chunk_id
== AMDGPU_CHUNK_ID_DEPENDENCIES
) {
1115 r
= amdgpu_cs_process_fence_dep(p
, chunk
);
1118 } else if (chunk
->chunk_id
== AMDGPU_CHUNK_ID_SYNCOBJ_IN
) {
1119 r
= amdgpu_cs_process_syncobj_in_dep(p
, chunk
);
1122 } else if (chunk
->chunk_id
== AMDGPU_CHUNK_ID_SYNCOBJ_OUT
) {
1123 r
= amdgpu_cs_process_syncobj_out_dep(p
, chunk
);
1132 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser
*p
)
1136 for (i
= 0; i
< p
->num_post_dep_syncobjs
; ++i
)
1137 drm_syncobj_replace_fence(p
->post_dep_syncobjs
[i
], p
->fence
);
1140 static int amdgpu_cs_submit(struct amdgpu_cs_parser
*p
,
1141 union drm_amdgpu_cs
*cs
)
1143 struct amdgpu_ring
*ring
= p
->job
->ring
;
1144 struct amd_sched_entity
*entity
= &p
->ctx
->rings
[ring
->idx
].entity
;
1145 struct amdgpu_job
*job
;
1151 amdgpu_mn_lock(p
->mn
);
1153 for (i
= p
->bo_list
->first_userptr
;
1154 i
< p
->bo_list
->num_entries
; ++i
) {
1155 struct amdgpu_bo
*bo
= p
->bo_list
->array
[i
].robj
;
1157 if (amdgpu_ttm_tt_userptr_needs_pages(bo
->tbo
.ttm
)) {
1158 amdgpu_mn_unlock(p
->mn
);
1159 return -ERESTARTSYS
;
1167 r
= amd_sched_job_init(&job
->base
, &ring
->sched
, entity
, p
->filp
);
1169 amdgpu_job_free(job
);
1170 amdgpu_mn_unlock(p
->mn
);
1174 job
->owner
= p
->filp
;
1175 job
->fence_ctx
= entity
->fence_context
;
1176 p
->fence
= dma_fence_get(&job
->base
.s_fence
->finished
);
1178 r
= amdgpu_ctx_add_fence(p
->ctx
, ring
, p
->fence
, &seq
);
1180 dma_fence_put(p
->fence
);
1181 dma_fence_put(&job
->base
.s_fence
->finished
);
1182 amdgpu_job_free(job
);
1183 amdgpu_mn_unlock(p
->mn
);
1187 amdgpu_cs_post_dependencies(p
);
1189 cs
->out
.handle
= seq
;
1190 job
->uf_sequence
= seq
;
1192 amdgpu_job_free_resources(job
);
1193 amdgpu_ring_priority_get(job
->ring
,
1194 amd_sched_get_job_priority(&job
->base
));
1196 trace_amdgpu_cs_ioctl(job
);
1197 amd_sched_entity_push_job(&job
->base
);
1199 ttm_eu_fence_buffer_objects(&p
->ticket
, &p
->validated
, p
->fence
);
1200 amdgpu_mn_unlock(p
->mn
);
1205 int amdgpu_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
1207 struct amdgpu_device
*adev
= dev
->dev_private
;
1208 union drm_amdgpu_cs
*cs
= data
;
1209 struct amdgpu_cs_parser parser
= {};
1210 bool reserved_buffers
= false;
1213 if (!adev
->accel_working
)
1219 r
= amdgpu_cs_parser_init(&parser
, data
);
1221 DRM_ERROR("Failed to initialize parser !\n");
1225 r
= amdgpu_cs_ib_fill(adev
, &parser
);
1229 r
= amdgpu_cs_parser_bos(&parser
, data
);
1232 DRM_ERROR("Not enough memory for command submission!\n");
1233 else if (r
!= -ERESTARTSYS
)
1234 DRM_ERROR("Failed to process the buffer list %d!\n", r
);
1238 reserved_buffers
= true;
1240 r
= amdgpu_cs_dependencies(adev
, &parser
);
1242 DRM_ERROR("Failed in the dependencies handling %d!\n", r
);
1246 for (i
= 0; i
< parser
.job
->num_ibs
; i
++)
1247 trace_amdgpu_cs(&parser
, i
);
1249 r
= amdgpu_cs_ib_vm_chunk(adev
, &parser
);
1253 r
= amdgpu_cs_submit(&parser
, cs
);
1256 amdgpu_cs_parser_fini(&parser
, r
, reserved_buffers
);
1261 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1264 * @data: data from userspace
1265 * @filp: file private
1267 * Wait for the command submission identified by handle to finish.
1269 int amdgpu_cs_wait_ioctl(struct drm_device
*dev
, void *data
,
1270 struct drm_file
*filp
)
1272 union drm_amdgpu_wait_cs
*wait
= data
;
1273 struct amdgpu_device
*adev
= dev
->dev_private
;
1274 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout
);
1275 struct amdgpu_ring
*ring
= NULL
;
1276 struct amdgpu_ctx
*ctx
;
1277 struct dma_fence
*fence
;
1280 ctx
= amdgpu_ctx_get(filp
->driver_priv
, wait
->in
.ctx_id
);
1284 r
= amdgpu_queue_mgr_map(adev
, &ctx
->queue_mgr
,
1285 wait
->in
.ip_type
, wait
->in
.ip_instance
,
1286 wait
->in
.ring
, &ring
);
1288 amdgpu_ctx_put(ctx
);
1292 fence
= amdgpu_ctx_get_fence(ctx
, ring
, wait
->in
.handle
);
1296 r
= dma_fence_wait_timeout(fence
, true, timeout
);
1297 if (r
> 0 && fence
->error
)
1299 dma_fence_put(fence
);
1303 amdgpu_ctx_put(ctx
);
1307 memset(wait
, 0, sizeof(*wait
));
1308 wait
->out
.status
= (r
== 0);
1314 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1316 * @adev: amdgpu device
1317 * @filp: file private
1318 * @user: drm_amdgpu_fence copied from user space
1320 static struct dma_fence
*amdgpu_cs_get_fence(struct amdgpu_device
*adev
,
1321 struct drm_file
*filp
,
1322 struct drm_amdgpu_fence
*user
)
1324 struct amdgpu_ring
*ring
;
1325 struct amdgpu_ctx
*ctx
;
1326 struct dma_fence
*fence
;
1329 ctx
= amdgpu_ctx_get(filp
->driver_priv
, user
->ctx_id
);
1331 return ERR_PTR(-EINVAL
);
1333 r
= amdgpu_queue_mgr_map(adev
, &ctx
->queue_mgr
, user
->ip_type
,
1334 user
->ip_instance
, user
->ring
, &ring
);
1336 amdgpu_ctx_put(ctx
);
1340 fence
= amdgpu_ctx_get_fence(ctx
, ring
, user
->seq_no
);
1341 amdgpu_ctx_put(ctx
);
1346 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device
*dev
, void *data
,
1347 struct drm_file
*filp
)
1349 struct amdgpu_device
*adev
= dev
->dev_private
;
1350 union drm_amdgpu_fence_to_handle
*info
= data
;
1351 struct dma_fence
*fence
;
1352 struct drm_syncobj
*syncobj
;
1353 struct sync_file
*sync_file
;
1356 fence
= amdgpu_cs_get_fence(adev
, filp
, &info
->in
.fence
);
1358 return PTR_ERR(fence
);
1360 switch (info
->in
.what
) {
1361 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ
:
1362 r
= drm_syncobj_create(&syncobj
, 0, fence
);
1363 dma_fence_put(fence
);
1366 r
= drm_syncobj_get_handle(filp
, syncobj
, &info
->out
.handle
);
1367 drm_syncobj_put(syncobj
);
1370 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD
:
1371 r
= drm_syncobj_create(&syncobj
, 0, fence
);
1372 dma_fence_put(fence
);
1375 r
= drm_syncobj_get_fd(syncobj
, (int*)&info
->out
.handle
);
1376 drm_syncobj_put(syncobj
);
1379 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD
:
1380 fd
= get_unused_fd_flags(O_CLOEXEC
);
1382 dma_fence_put(fence
);
1386 sync_file
= sync_file_create(fence
);
1387 dma_fence_put(fence
);
1393 fd_install(fd
, sync_file
->file
);
1394 info
->out
.handle
= fd
;
1403 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1405 * @adev: amdgpu device
1406 * @filp: file private
1407 * @wait: wait parameters
1408 * @fences: array of drm_amdgpu_fence
1410 static int amdgpu_cs_wait_all_fences(struct amdgpu_device
*adev
,
1411 struct drm_file
*filp
,
1412 union drm_amdgpu_wait_fences
*wait
,
1413 struct drm_amdgpu_fence
*fences
)
1415 uint32_t fence_count
= wait
->in
.fence_count
;
1419 for (i
= 0; i
< fence_count
; i
++) {
1420 struct dma_fence
*fence
;
1421 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout_ns
);
1423 fence
= amdgpu_cs_get_fence(adev
, filp
, &fences
[i
]);
1425 return PTR_ERR(fence
);
1429 r
= dma_fence_wait_timeout(fence
, true, timeout
);
1430 dma_fence_put(fence
);
1438 return fence
->error
;
1441 memset(wait
, 0, sizeof(*wait
));
1442 wait
->out
.status
= (r
> 0);
1448 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1450 * @adev: amdgpu device
1451 * @filp: file private
1452 * @wait: wait parameters
1453 * @fences: array of drm_amdgpu_fence
1455 static int amdgpu_cs_wait_any_fence(struct amdgpu_device
*adev
,
1456 struct drm_file
*filp
,
1457 union drm_amdgpu_wait_fences
*wait
,
1458 struct drm_amdgpu_fence
*fences
)
1460 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout_ns
);
1461 uint32_t fence_count
= wait
->in
.fence_count
;
1462 uint32_t first
= ~0;
1463 struct dma_fence
**array
;
1467 /* Prepare the fence array */
1468 array
= kcalloc(fence_count
, sizeof(struct dma_fence
*), GFP_KERNEL
);
1473 for (i
= 0; i
< fence_count
; i
++) {
1474 struct dma_fence
*fence
;
1476 fence
= amdgpu_cs_get_fence(adev
, filp
, &fences
[i
]);
1477 if (IS_ERR(fence
)) {
1479 goto err_free_fence_array
;
1482 } else { /* NULL, the fence has been already signaled */
1489 r
= dma_fence_wait_any_timeout(array
, fence_count
, true, timeout
,
1492 goto err_free_fence_array
;
1495 memset(wait
, 0, sizeof(*wait
));
1496 wait
->out
.status
= (r
> 0);
1497 wait
->out
.first_signaled
= first
;
1498 /* set return value 0 to indicate success */
1499 r
= array
[first
]->error
;
1501 err_free_fence_array
:
1502 for (i
= 0; i
< fence_count
; i
++)
1503 dma_fence_put(array
[i
]);
1510 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1513 * @data: data from userspace
1514 * @filp: file private
1516 int amdgpu_cs_wait_fences_ioctl(struct drm_device
*dev
, void *data
,
1517 struct drm_file
*filp
)
1519 struct amdgpu_device
*adev
= dev
->dev_private
;
1520 union drm_amdgpu_wait_fences
*wait
= data
;
1521 uint32_t fence_count
= wait
->in
.fence_count
;
1522 struct drm_amdgpu_fence
*fences_user
;
1523 struct drm_amdgpu_fence
*fences
;
1526 /* Get the fences from userspace */
1527 fences
= kmalloc_array(fence_count
, sizeof(struct drm_amdgpu_fence
),
1532 fences_user
= u64_to_user_ptr(wait
->in
.fences
);
1533 if (copy_from_user(fences
, fences_user
,
1534 sizeof(struct drm_amdgpu_fence
) * fence_count
)) {
1536 goto err_free_fences
;
1539 if (wait
->in
.wait_all
)
1540 r
= amdgpu_cs_wait_all_fences(adev
, filp
, wait
, fences
);
1542 r
= amdgpu_cs_wait_any_fence(adev
, filp
, wait
, fences
);
1551 * amdgpu_cs_find_bo_va - find bo_va for VM address
1553 * @parser: command submission parser context
1555 * @bo: resulting BO of the mapping found
1557 * Search the buffer objects in the command submission context for a certain
1558 * virtual memory address. Returns allocation structure when found, NULL
1561 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser
*parser
,
1562 uint64_t addr
, struct amdgpu_bo
**bo
,
1563 struct amdgpu_bo_va_mapping
**map
)
1565 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
1566 struct amdgpu_vm
*vm
= &fpriv
->vm
;
1567 struct amdgpu_bo_va_mapping
*mapping
;
1570 addr
/= AMDGPU_GPU_PAGE_SIZE
;
1572 mapping
= amdgpu_vm_bo_lookup_mapping(vm
, addr
);
1573 if (!mapping
|| !mapping
->bo_va
|| !mapping
->bo_va
->base
.bo
)
1576 *bo
= mapping
->bo_va
->base
.bo
;
1579 /* Double check that the BO is reserved by this CS */
1580 if (READ_ONCE((*bo
)->tbo
.resv
->lock
.ctx
) != &parser
->ticket
)
1583 if (!((*bo
)->flags
& AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
)) {
1584 (*bo
)->flags
|= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
1585 amdgpu_ttm_placement_from_domain(*bo
, (*bo
)->allowed_domains
);
1586 r
= ttm_bo_validate(&(*bo
)->tbo
, &(*bo
)->placement
, false,
1592 return amdgpu_ttm_bind(&(*bo
)->tbo
, &(*bo
)->tbo
.mem
);