2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
36 #include "amdgpu_trace.h"
37 #include "amdgpu_gmc.h"
38 #include "amdgpu_gem.h"
39 #include "amdgpu_ras.h"
41 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser
*p
,
42 struct drm_amdgpu_cs_chunk_fence
*data
,
45 struct drm_gem_object
*gobj
;
50 gobj
= drm_gem_object_lookup(p
->filp
, data
->handle
);
54 bo
= amdgpu_bo_ref(gem_to_amdgpu_bo(gobj
));
55 p
->uf_entry
.priority
= 0;
56 p
->uf_entry
.tv
.bo
= &bo
->tbo
;
57 /* One for TTM and one for the CS job */
58 p
->uf_entry
.tv
.num_shared
= 2;
60 drm_gem_object_put_unlocked(gobj
);
62 size
= amdgpu_bo_size(bo
);
63 if (size
!= PAGE_SIZE
|| (data
->offset
+ 8) > size
) {
68 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
73 *offset
= data
->offset
;
82 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser
*p
,
83 struct drm_amdgpu_bo_list_in
*data
)
86 struct drm_amdgpu_bo_list_entry
*info
= NULL
;
88 r
= amdgpu_bo_create_list_entry_array(data
, &info
);
92 r
= amdgpu_bo_list_create(p
->adev
, p
->filp
, info
, data
->bo_number
,
107 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser
*p
, union drm_amdgpu_cs
*cs
)
109 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
110 struct amdgpu_vm
*vm
= &fpriv
->vm
;
111 uint64_t *chunk_array_user
;
112 uint64_t *chunk_array
;
113 unsigned size
, num_ibs
= 0;
114 uint32_t uf_offset
= 0;
118 if (cs
->in
.num_chunks
== 0)
121 chunk_array
= kmalloc_array(cs
->in
.num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
125 p
->ctx
= amdgpu_ctx_get(fpriv
, cs
->in
.ctx_id
);
131 mutex_lock(&p
->ctx
->lock
);
133 /* skip guilty context job */
134 if (atomic_read(&p
->ctx
->guilty
) == 1) {
140 chunk_array_user
= u64_to_user_ptr(cs
->in
.chunks
);
141 if (copy_from_user(chunk_array
, chunk_array_user
,
142 sizeof(uint64_t)*cs
->in
.num_chunks
)) {
147 p
->nchunks
= cs
->in
.num_chunks
;
148 p
->chunks
= kmalloc_array(p
->nchunks
, sizeof(struct amdgpu_cs_chunk
),
155 for (i
= 0; i
< p
->nchunks
; i
++) {
156 struct drm_amdgpu_cs_chunk __user
**chunk_ptr
= NULL
;
157 struct drm_amdgpu_cs_chunk user_chunk
;
158 uint32_t __user
*cdata
;
160 chunk_ptr
= u64_to_user_ptr(chunk_array
[i
]);
161 if (copy_from_user(&user_chunk
, chunk_ptr
,
162 sizeof(struct drm_amdgpu_cs_chunk
))) {
165 goto free_partial_kdata
;
167 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
168 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
170 size
= p
->chunks
[i
].length_dw
;
171 cdata
= u64_to_user_ptr(user_chunk
.chunk_data
);
173 p
->chunks
[i
].kdata
= kvmalloc_array(size
, sizeof(uint32_t), GFP_KERNEL
);
174 if (p
->chunks
[i
].kdata
== NULL
) {
177 goto free_partial_kdata
;
179 size
*= sizeof(uint32_t);
180 if (copy_from_user(p
->chunks
[i
].kdata
, cdata
, size
)) {
182 goto free_partial_kdata
;
185 switch (p
->chunks
[i
].chunk_id
) {
186 case AMDGPU_CHUNK_ID_IB
:
190 case AMDGPU_CHUNK_ID_FENCE
:
191 size
= sizeof(struct drm_amdgpu_cs_chunk_fence
);
192 if (p
->chunks
[i
].length_dw
* sizeof(uint32_t) < size
) {
194 goto free_partial_kdata
;
197 ret
= amdgpu_cs_user_fence_chunk(p
, p
->chunks
[i
].kdata
,
200 goto free_partial_kdata
;
204 case AMDGPU_CHUNK_ID_BO_HANDLES
:
205 size
= sizeof(struct drm_amdgpu_bo_list_in
);
206 if (p
->chunks
[i
].length_dw
* sizeof(uint32_t) < size
) {
208 goto free_partial_kdata
;
211 ret
= amdgpu_cs_bo_handles_chunk(p
, p
->chunks
[i
].kdata
);
213 goto free_partial_kdata
;
217 case AMDGPU_CHUNK_ID_DEPENDENCIES
:
218 case AMDGPU_CHUNK_ID_SYNCOBJ_IN
:
219 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT
:
220 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
:
221 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT
:
222 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL
:
227 goto free_partial_kdata
;
231 ret
= amdgpu_job_alloc(p
->adev
, num_ibs
, &p
->job
, vm
);
235 if (p
->ctx
->vram_lost_counter
!= p
->job
->vram_lost_counter
) {
240 if (p
->uf_entry
.tv
.bo
)
241 p
->job
->uf_addr
= uf_offset
;
244 /* Use this opportunity to fill in task info for the vm */
245 amdgpu_vm_set_task_info(vm
);
253 kvfree(p
->chunks
[i
].kdata
);
263 /* Convert microseconds to bytes. */
264 static u64
us_to_bytes(struct amdgpu_device
*adev
, s64 us
)
266 if (us
<= 0 || !adev
->mm_stats
.log2_max_MBps
)
269 /* Since accum_us is incremented by a million per second, just
270 * multiply it by the number of MB/s to get the number of bytes.
272 return us
<< adev
->mm_stats
.log2_max_MBps
;
275 static s64
bytes_to_us(struct amdgpu_device
*adev
, u64 bytes
)
277 if (!adev
->mm_stats
.log2_max_MBps
)
280 return bytes
>> adev
->mm_stats
.log2_max_MBps
;
283 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
284 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
285 * which means it can go over the threshold once. If that happens, the driver
286 * will be in debt and no other buffer migrations can be done until that debt
289 * This approach allows moving a buffer of any size (it's important to allow
292 * The currency is simply time in microseconds and it increases as the clock
293 * ticks. The accumulated microseconds (us) are converted to bytes and
296 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device
*adev
,
300 s64 time_us
, increment_us
;
301 u64 free_vram
, total_vram
, used_vram
;
303 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
306 * It means that in order to get full max MBps, at least 5 IBs per
307 * second must be submitted and not more than 200ms apart from each
310 const s64 us_upper_bound
= 200000;
312 if (!adev
->mm_stats
.log2_max_MBps
) {
318 total_vram
= adev
->gmc
.real_vram_size
- atomic64_read(&adev
->vram_pin_size
);
319 used_vram
= amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
320 free_vram
= used_vram
>= total_vram
? 0 : total_vram
- used_vram
;
322 spin_lock(&adev
->mm_stats
.lock
);
324 /* Increase the amount of accumulated us. */
325 time_us
= ktime_to_us(ktime_get());
326 increment_us
= time_us
- adev
->mm_stats
.last_update_us
;
327 adev
->mm_stats
.last_update_us
= time_us
;
328 adev
->mm_stats
.accum_us
= min(adev
->mm_stats
.accum_us
+ increment_us
,
331 /* This prevents the short period of low performance when the VRAM
332 * usage is low and the driver is in debt or doesn't have enough
333 * accumulated us to fill VRAM quickly.
335 * The situation can occur in these cases:
336 * - a lot of VRAM is freed by userspace
337 * - the presence of a big buffer causes a lot of evictions
338 * (solution: split buffers into smaller ones)
340 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
341 * accum_us to a positive number.
343 if (free_vram
>= 128 * 1024 * 1024 || free_vram
>= total_vram
/ 8) {
346 /* Be more aggresive on dGPUs. Try to fill a portion of free
349 if (!(adev
->flags
& AMD_IS_APU
))
350 min_us
= bytes_to_us(adev
, free_vram
/ 4);
352 min_us
= 0; /* Reset accum_us on APUs. */
354 adev
->mm_stats
.accum_us
= max(min_us
, adev
->mm_stats
.accum_us
);
357 /* This is set to 0 if the driver is in debt to disallow (optional)
360 *max_bytes
= us_to_bytes(adev
, adev
->mm_stats
.accum_us
);
362 /* Do the same for visible VRAM if half of it is free */
363 if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
)) {
364 u64 total_vis_vram
= adev
->gmc
.visible_vram_size
;
366 amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
368 if (used_vis_vram
< total_vis_vram
) {
369 u64 free_vis_vram
= total_vis_vram
- used_vis_vram
;
370 adev
->mm_stats
.accum_us_vis
= min(adev
->mm_stats
.accum_us_vis
+
371 increment_us
, us_upper_bound
);
373 if (free_vis_vram
>= total_vis_vram
/ 2)
374 adev
->mm_stats
.accum_us_vis
=
375 max(bytes_to_us(adev
, free_vis_vram
/ 2),
376 adev
->mm_stats
.accum_us_vis
);
379 *max_vis_bytes
= us_to_bytes(adev
, adev
->mm_stats
.accum_us_vis
);
384 spin_unlock(&adev
->mm_stats
.lock
);
387 /* Report how many bytes have really been moved for the last command
388 * submission. This can result in a debt that can stop buffer migrations
391 void amdgpu_cs_report_moved_bytes(struct amdgpu_device
*adev
, u64 num_bytes
,
394 spin_lock(&adev
->mm_stats
.lock
);
395 adev
->mm_stats
.accum_us
-= bytes_to_us(adev
, num_bytes
);
396 adev
->mm_stats
.accum_us_vis
-= bytes_to_us(adev
, num_vis_bytes
);
397 spin_unlock(&adev
->mm_stats
.lock
);
400 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser
*p
,
401 struct amdgpu_bo
*bo
)
403 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
404 struct ttm_operation_ctx ctx
= {
405 .interruptible
= true,
406 .no_wait_gpu
= false,
407 .resv
= bo
->tbo
.base
.resv
,
416 /* Don't move this buffer if we have depleted our allowance
417 * to move it. Don't move anything if the threshold is zero.
419 if (p
->bytes_moved
< p
->bytes_moved_threshold
&&
420 (!bo
->tbo
.base
.dma_buf
||
421 list_empty(&bo
->tbo
.base
.dma_buf
->attachments
))) {
422 if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
423 (bo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)) {
424 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
425 * visible VRAM if we've depleted our allowance to do
428 if (p
->bytes_moved_vis
< p
->bytes_moved_vis_threshold
)
429 domain
= bo
->preferred_domains
;
431 domain
= bo
->allowed_domains
;
433 domain
= bo
->preferred_domains
;
436 domain
= bo
->allowed_domains
;
440 amdgpu_bo_placement_from_domain(bo
, domain
);
441 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
443 p
->bytes_moved
+= ctx
.bytes_moved
;
444 if (!amdgpu_gmc_vram_full_visible(&adev
->gmc
) &&
445 amdgpu_bo_in_cpu_visible_vram(bo
))
446 p
->bytes_moved_vis
+= ctx
.bytes_moved
;
448 if (unlikely(r
== -ENOMEM
) && domain
!= bo
->allowed_domains
) {
449 domain
= bo
->allowed_domains
;
456 static int amdgpu_cs_validate(void *param
, struct amdgpu_bo
*bo
)
458 struct amdgpu_cs_parser
*p
= param
;
461 r
= amdgpu_cs_bo_validate(p
, bo
);
466 r
= amdgpu_cs_bo_validate(p
, bo
->shadow
);
471 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser
*p
,
472 struct list_head
*validated
)
474 struct ttm_operation_ctx ctx
= { true, false };
475 struct amdgpu_bo_list_entry
*lobj
;
478 list_for_each_entry(lobj
, validated
, tv
.head
) {
479 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(lobj
->tv
.bo
);
480 struct mm_struct
*usermm
;
482 usermm
= amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
);
483 if (usermm
&& usermm
!= current
->mm
)
486 if (amdgpu_ttm_tt_is_userptr(bo
->tbo
.ttm
) &&
487 lobj
->user_invalidated
&& lobj
->user_pages
) {
488 amdgpu_bo_placement_from_domain(bo
,
489 AMDGPU_GEM_DOMAIN_CPU
);
490 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
494 amdgpu_ttm_tt_set_user_pages(bo
->tbo
.ttm
,
498 r
= amdgpu_cs_validate(p
, bo
);
502 kvfree(lobj
->user_pages
);
503 lobj
->user_pages
= NULL
;
508 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser
*p
,
509 union drm_amdgpu_cs
*cs
)
511 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
512 struct amdgpu_vm
*vm
= &fpriv
->vm
;
513 struct amdgpu_bo_list_entry
*e
;
514 struct list_head duplicates
;
515 struct amdgpu_bo
*gds
;
516 struct amdgpu_bo
*gws
;
517 struct amdgpu_bo
*oa
;
520 INIT_LIST_HEAD(&p
->validated
);
522 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
523 if (cs
->in
.bo_list_handle
) {
527 r
= amdgpu_bo_list_get(fpriv
, cs
->in
.bo_list_handle
,
531 } else if (!p
->bo_list
) {
532 /* Create a empty bo_list when no handle is provided */
533 r
= amdgpu_bo_list_create(p
->adev
, p
->filp
, NULL
, 0,
539 /* One for TTM and one for the CS job */
540 amdgpu_bo_list_for_each_entry(e
, p
->bo_list
)
541 e
->tv
.num_shared
= 2;
543 amdgpu_bo_list_get_list(p
->bo_list
, &p
->validated
);
545 INIT_LIST_HEAD(&duplicates
);
546 amdgpu_vm_get_pd_bo(&fpriv
->vm
, &p
->validated
, &p
->vm_pd
);
548 if (p
->uf_entry
.tv
.bo
&& !ttm_to_amdgpu_bo(p
->uf_entry
.tv
.bo
)->parent
)
549 list_add(&p
->uf_entry
.tv
.head
, &p
->validated
);
551 /* Get userptr backing pages. If pages are updated after registered
552 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
553 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
555 amdgpu_bo_list_for_each_userptr_entry(e
, p
->bo_list
) {
556 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
557 bool userpage_invalidated
= false;
560 e
->user_pages
= kvmalloc_array(bo
->tbo
.ttm
->num_pages
,
561 sizeof(struct page
*),
562 GFP_KERNEL
| __GFP_ZERO
);
563 if (!e
->user_pages
) {
564 DRM_ERROR("calloc failure\n");
568 r
= amdgpu_ttm_tt_get_user_pages(bo
, e
->user_pages
);
570 kvfree(e
->user_pages
);
571 e
->user_pages
= NULL
;
575 for (i
= 0; i
< bo
->tbo
.ttm
->num_pages
; i
++) {
576 if (bo
->tbo
.ttm
->pages
[i
] != e
->user_pages
[i
]) {
577 userpage_invalidated
= true;
581 e
->user_invalidated
= userpage_invalidated
;
584 r
= ttm_eu_reserve_buffers(&p
->ticket
, &p
->validated
, true,
586 if (unlikely(r
!= 0)) {
587 if (r
!= -ERESTARTSYS
)
588 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
592 amdgpu_cs_get_threshold_for_moves(p
->adev
, &p
->bytes_moved_threshold
,
593 &p
->bytes_moved_vis_threshold
);
595 p
->bytes_moved_vis
= 0;
597 r
= amdgpu_vm_validate_pt_bos(p
->adev
, &fpriv
->vm
,
598 amdgpu_cs_validate
, p
);
600 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
604 r
= amdgpu_cs_list_validate(p
, &duplicates
);
608 r
= amdgpu_cs_list_validate(p
, &p
->validated
);
612 amdgpu_cs_report_moved_bytes(p
->adev
, p
->bytes_moved
,
615 gds
= p
->bo_list
->gds_obj
;
616 gws
= p
->bo_list
->gws_obj
;
617 oa
= p
->bo_list
->oa_obj
;
619 amdgpu_bo_list_for_each_entry(e
, p
->bo_list
) {
620 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
622 /* Make sure we use the exclusive slot for shared BOs */
623 if (bo
->prime_shared_count
)
624 e
->tv
.num_shared
= 0;
625 e
->bo_va
= amdgpu_vm_bo_find(vm
, bo
);
629 p
->job
->gds_base
= amdgpu_bo_gpu_offset(gds
) >> PAGE_SHIFT
;
630 p
->job
->gds_size
= amdgpu_bo_size(gds
) >> PAGE_SHIFT
;
633 p
->job
->gws_base
= amdgpu_bo_gpu_offset(gws
) >> PAGE_SHIFT
;
634 p
->job
->gws_size
= amdgpu_bo_size(gws
) >> PAGE_SHIFT
;
637 p
->job
->oa_base
= amdgpu_bo_gpu_offset(oa
) >> PAGE_SHIFT
;
638 p
->job
->oa_size
= amdgpu_bo_size(oa
) >> PAGE_SHIFT
;
641 if (!r
&& p
->uf_entry
.tv
.bo
) {
642 struct amdgpu_bo
*uf
= ttm_to_amdgpu_bo(p
->uf_entry
.tv
.bo
);
644 r
= amdgpu_ttm_alloc_gart(&uf
->tbo
);
645 p
->job
->uf_addr
+= amdgpu_bo_gpu_offset(uf
);
650 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
655 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser
*p
)
657 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
658 struct amdgpu_bo_list_entry
*e
;
661 list_for_each_entry(e
, &p
->validated
, tv
.head
) {
662 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
663 struct dma_resv
*resv
= bo
->tbo
.base
.resv
;
664 enum amdgpu_sync_mode sync_mode
;
666 sync_mode
= amdgpu_bo_explicit_sync(bo
) ?
667 AMDGPU_SYNC_EXPLICIT
: AMDGPU_SYNC_NE_OWNER
;
668 r
= amdgpu_sync_resv(p
->adev
, &p
->job
->sync
, resv
, sync_mode
,
677 * cs_parser_fini() - clean parser states
678 * @parser: parser structure holding parsing context.
679 * @error: error number
681 * If error is set than unvalidate buffer, otherwise just free memory
682 * used by parsing context.
684 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser
*parser
, int error
,
689 if (error
&& backoff
)
690 ttm_eu_backoff_reservation(&parser
->ticket
,
693 for (i
= 0; i
< parser
->num_post_deps
; i
++) {
694 drm_syncobj_put(parser
->post_deps
[i
].syncobj
);
695 kfree(parser
->post_deps
[i
].chain
);
697 kfree(parser
->post_deps
);
699 dma_fence_put(parser
->fence
);
702 mutex_unlock(&parser
->ctx
->lock
);
703 amdgpu_ctx_put(parser
->ctx
);
706 amdgpu_bo_list_put(parser
->bo_list
);
708 for (i
= 0; i
< parser
->nchunks
; i
++)
709 kvfree(parser
->chunks
[i
].kdata
);
710 kfree(parser
->chunks
);
712 amdgpu_job_free(parser
->job
);
713 if (parser
->uf_entry
.tv
.bo
) {
714 struct amdgpu_bo
*uf
= ttm_to_amdgpu_bo(parser
->uf_entry
.tv
.bo
);
716 amdgpu_bo_unref(&uf
);
720 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser
*p
)
722 struct amdgpu_ring
*ring
= to_amdgpu_ring(p
->entity
->rq
->sched
);
723 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
724 struct amdgpu_device
*adev
= p
->adev
;
725 struct amdgpu_vm
*vm
= &fpriv
->vm
;
726 struct amdgpu_bo_list_entry
*e
;
727 struct amdgpu_bo_va
*bo_va
;
728 struct amdgpu_bo
*bo
;
731 /* Only for UVD/VCE VM emulation */
732 if (ring
->funcs
->parse_cs
|| ring
->funcs
->patch_cs_in_place
) {
735 for (i
= 0, j
= 0; i
< p
->nchunks
&& j
< p
->job
->num_ibs
; i
++) {
736 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
737 struct amdgpu_bo_va_mapping
*m
;
738 struct amdgpu_bo
*aobj
= NULL
;
739 struct amdgpu_cs_chunk
*chunk
;
740 uint64_t offset
, va_start
;
741 struct amdgpu_ib
*ib
;
744 chunk
= &p
->chunks
[i
];
745 ib
= &p
->job
->ibs
[j
];
746 chunk_ib
= chunk
->kdata
;
748 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
751 va_start
= chunk_ib
->va_start
& AMDGPU_GMC_HOLE_MASK
;
752 r
= amdgpu_cs_find_mapping(p
, va_start
, &aobj
, &m
);
754 DRM_ERROR("IB va_start is invalid\n");
758 if ((va_start
+ chunk_ib
->ib_bytes
) >
759 (m
->last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
760 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
764 /* the IB should be reserved at this point */
765 r
= amdgpu_bo_kmap(aobj
, (void **)&kptr
);
770 offset
= m
->start
* AMDGPU_GPU_PAGE_SIZE
;
771 kptr
+= va_start
- offset
;
773 if (ring
->funcs
->parse_cs
) {
774 memcpy(ib
->ptr
, kptr
, chunk_ib
->ib_bytes
);
775 amdgpu_bo_kunmap(aobj
);
777 r
= amdgpu_ring_parse_cs(ring
, p
, j
);
781 ib
->ptr
= (uint32_t *)kptr
;
782 r
= amdgpu_ring_patch_cs_in_place(ring
, p
, j
);
783 amdgpu_bo_kunmap(aobj
);
793 return amdgpu_cs_sync_rings(p
);
796 r
= amdgpu_vm_clear_freed(adev
, vm
, NULL
);
800 r
= amdgpu_vm_bo_update(adev
, fpriv
->prt_va
, false);
804 r
= amdgpu_sync_vm_fence(&p
->job
->sync
, fpriv
->prt_va
->last_pt_update
);
808 if (amdgpu_mcbp
|| amdgpu_sriov_vf(adev
)) {
809 bo_va
= fpriv
->csa_va
;
811 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
815 r
= amdgpu_sync_vm_fence(&p
->job
->sync
, bo_va
->last_pt_update
);
820 amdgpu_bo_list_for_each_entry(e
, p
->bo_list
) {
821 /* ignore duplicates */
822 bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
830 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
834 r
= amdgpu_sync_vm_fence(&p
->job
->sync
, bo_va
->last_pt_update
);
839 r
= amdgpu_vm_handle_moved(adev
, vm
);
843 r
= amdgpu_vm_update_pdes(adev
, vm
, false);
847 r
= amdgpu_sync_vm_fence(&p
->job
->sync
, vm
->last_update
);
851 p
->job
->vm_pd_addr
= amdgpu_gmc_pd_addr(vm
->root
.base
.bo
);
853 if (amdgpu_vm_debug
) {
854 /* Invalidate all BOs to test for userspace bugs */
855 amdgpu_bo_list_for_each_entry(e
, p
->bo_list
) {
856 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
858 /* ignore duplicates */
862 amdgpu_vm_bo_invalidate(adev
, bo
, false);
866 return amdgpu_cs_sync_rings(p
);
869 static int amdgpu_cs_ib_fill(struct amdgpu_device
*adev
,
870 struct amdgpu_cs_parser
*parser
)
872 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
873 struct amdgpu_vm
*vm
= &fpriv
->vm
;
874 int r
, ce_preempt
= 0, de_preempt
= 0;
875 struct amdgpu_ring
*ring
;
878 for (i
= 0, j
= 0; i
< parser
->nchunks
&& j
< parser
->job
->num_ibs
; i
++) {
879 struct amdgpu_cs_chunk
*chunk
;
880 struct amdgpu_ib
*ib
;
881 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
882 struct drm_sched_entity
*entity
;
884 chunk
= &parser
->chunks
[i
];
885 ib
= &parser
->job
->ibs
[j
];
886 chunk_ib
= (struct drm_amdgpu_cs_chunk_ib
*)chunk
->kdata
;
888 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
891 if (chunk_ib
->ip_type
== AMDGPU_HW_IP_GFX
&&
892 (amdgpu_mcbp
|| amdgpu_sriov_vf(adev
))) {
893 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_PREEMPT
) {
894 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_CE
)
900 /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
901 if (ce_preempt
> 1 || de_preempt
> 1)
905 r
= amdgpu_ctx_get_entity(parser
->ctx
, chunk_ib
->ip_type
,
906 chunk_ib
->ip_instance
, chunk_ib
->ring
,
911 if (chunk_ib
->flags
& AMDGPU_IB_FLAG_PREAMBLE
)
912 parser
->job
->preamble_status
|=
913 AMDGPU_PREAMBLE_IB_PRESENT
;
915 if (parser
->entity
&& parser
->entity
!= entity
)
918 /* Return if there is no run queue associated with this entity.
919 * Possibly because of disabled HW IP*/
920 if (entity
->rq
== NULL
)
923 parser
->entity
= entity
;
925 ring
= to_amdgpu_ring(entity
->rq
->sched
);
926 r
= amdgpu_ib_get(adev
, vm
, ring
->funcs
->parse_cs
?
927 chunk_ib
->ib_bytes
: 0,
928 AMDGPU_IB_POOL_DELAYED
, ib
);
930 DRM_ERROR("Failed to get ib !\n");
934 ib
->gpu_addr
= chunk_ib
->va_start
;
935 ib
->length_dw
= chunk_ib
->ib_bytes
/ 4;
936 ib
->flags
= chunk_ib
->flags
;
941 /* MM engine doesn't support user fences */
942 ring
= to_amdgpu_ring(parser
->entity
->rq
->sched
);
943 if (parser
->job
->uf_addr
&& ring
->funcs
->no_user_fence
)
946 return amdgpu_ctx_wait_prev_fence(parser
->ctx
, parser
->entity
);
949 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser
*p
,
950 struct amdgpu_cs_chunk
*chunk
)
952 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
955 struct drm_amdgpu_cs_chunk_dep
*deps
;
957 deps
= (struct drm_amdgpu_cs_chunk_dep
*)chunk
->kdata
;
958 num_deps
= chunk
->length_dw
* 4 /
959 sizeof(struct drm_amdgpu_cs_chunk_dep
);
961 for (i
= 0; i
< num_deps
; ++i
) {
962 struct amdgpu_ctx
*ctx
;
963 struct drm_sched_entity
*entity
;
964 struct dma_fence
*fence
;
966 ctx
= amdgpu_ctx_get(fpriv
, deps
[i
].ctx_id
);
970 r
= amdgpu_ctx_get_entity(ctx
, deps
[i
].ip_type
,
972 deps
[i
].ring
, &entity
);
978 fence
= amdgpu_ctx_get_fence(ctx
, entity
, deps
[i
].handle
);
982 return PTR_ERR(fence
);
986 if (chunk
->chunk_id
== AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
) {
987 struct drm_sched_fence
*s_fence
;
988 struct dma_fence
*old
= fence
;
990 s_fence
= to_drm_sched_fence(fence
);
991 fence
= dma_fence_get(&s_fence
->scheduled
);
995 r
= amdgpu_sync_fence(&p
->job
->sync
, fence
, true);
996 dma_fence_put(fence
);
1003 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser
*p
,
1004 uint32_t handle
, u64 point
,
1007 struct dma_fence
*fence
;
1010 r
= drm_syncobj_find_fence(p
->filp
, handle
, point
, flags
, &fence
);
1012 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1017 r
= amdgpu_sync_fence(&p
->job
->sync
, fence
, true);
1018 dma_fence_put(fence
);
1023 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser
*p
,
1024 struct amdgpu_cs_chunk
*chunk
)
1026 struct drm_amdgpu_cs_chunk_sem
*deps
;
1030 deps
= (struct drm_amdgpu_cs_chunk_sem
*)chunk
->kdata
;
1031 num_deps
= chunk
->length_dw
* 4 /
1032 sizeof(struct drm_amdgpu_cs_chunk_sem
);
1033 for (i
= 0; i
< num_deps
; ++i
) {
1034 r
= amdgpu_syncobj_lookup_and_add_to_sync(p
, deps
[i
].handle
,
1044 static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser
*p
,
1045 struct amdgpu_cs_chunk
*chunk
)
1047 struct drm_amdgpu_cs_chunk_syncobj
*syncobj_deps
;
1051 syncobj_deps
= (struct drm_amdgpu_cs_chunk_syncobj
*)chunk
->kdata
;
1052 num_deps
= chunk
->length_dw
* 4 /
1053 sizeof(struct drm_amdgpu_cs_chunk_syncobj
);
1054 for (i
= 0; i
< num_deps
; ++i
) {
1055 r
= amdgpu_syncobj_lookup_and_add_to_sync(p
,
1056 syncobj_deps
[i
].handle
,
1057 syncobj_deps
[i
].point
,
1058 syncobj_deps
[i
].flags
);
1066 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser
*p
,
1067 struct amdgpu_cs_chunk
*chunk
)
1069 struct drm_amdgpu_cs_chunk_sem
*deps
;
1073 deps
= (struct drm_amdgpu_cs_chunk_sem
*)chunk
->kdata
;
1074 num_deps
= chunk
->length_dw
* 4 /
1075 sizeof(struct drm_amdgpu_cs_chunk_sem
);
1080 p
->post_deps
= kmalloc_array(num_deps
, sizeof(*p
->post_deps
),
1082 p
->num_post_deps
= 0;
1088 for (i
= 0; i
< num_deps
; ++i
) {
1089 p
->post_deps
[i
].syncobj
=
1090 drm_syncobj_find(p
->filp
, deps
[i
].handle
);
1091 if (!p
->post_deps
[i
].syncobj
)
1093 p
->post_deps
[i
].chain
= NULL
;
1094 p
->post_deps
[i
].point
= 0;
1102 static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser
*p
,
1103 struct amdgpu_cs_chunk
*chunk
)
1105 struct drm_amdgpu_cs_chunk_syncobj
*syncobj_deps
;
1109 syncobj_deps
= (struct drm_amdgpu_cs_chunk_syncobj
*)chunk
->kdata
;
1110 num_deps
= chunk
->length_dw
* 4 /
1111 sizeof(struct drm_amdgpu_cs_chunk_syncobj
);
1116 p
->post_deps
= kmalloc_array(num_deps
, sizeof(*p
->post_deps
),
1118 p
->num_post_deps
= 0;
1123 for (i
= 0; i
< num_deps
; ++i
) {
1124 struct amdgpu_cs_post_dep
*dep
= &p
->post_deps
[i
];
1127 if (syncobj_deps
[i
].point
) {
1128 dep
->chain
= kmalloc(sizeof(*dep
->chain
), GFP_KERNEL
);
1133 dep
->syncobj
= drm_syncobj_find(p
->filp
,
1134 syncobj_deps
[i
].handle
);
1135 if (!dep
->syncobj
) {
1139 dep
->point
= syncobj_deps
[i
].point
;
1146 static int amdgpu_cs_dependencies(struct amdgpu_device
*adev
,
1147 struct amdgpu_cs_parser
*p
)
1151 for (i
= 0; i
< p
->nchunks
; ++i
) {
1152 struct amdgpu_cs_chunk
*chunk
;
1154 chunk
= &p
->chunks
[i
];
1156 switch (chunk
->chunk_id
) {
1157 case AMDGPU_CHUNK_ID_DEPENDENCIES
:
1158 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
:
1159 r
= amdgpu_cs_process_fence_dep(p
, chunk
);
1163 case AMDGPU_CHUNK_ID_SYNCOBJ_IN
:
1164 r
= amdgpu_cs_process_syncobj_in_dep(p
, chunk
);
1168 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT
:
1169 r
= amdgpu_cs_process_syncobj_out_dep(p
, chunk
);
1173 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT
:
1174 r
= amdgpu_cs_process_syncobj_timeline_in_dep(p
, chunk
);
1178 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL
:
1179 r
= amdgpu_cs_process_syncobj_timeline_out_dep(p
, chunk
);
1189 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser
*p
)
1193 for (i
= 0; i
< p
->num_post_deps
; ++i
) {
1194 if (p
->post_deps
[i
].chain
&& p
->post_deps
[i
].point
) {
1195 drm_syncobj_add_point(p
->post_deps
[i
].syncobj
,
1196 p
->post_deps
[i
].chain
,
1197 p
->fence
, p
->post_deps
[i
].point
);
1198 p
->post_deps
[i
].chain
= NULL
;
1200 drm_syncobj_replace_fence(p
->post_deps
[i
].syncobj
,
1206 static int amdgpu_cs_submit(struct amdgpu_cs_parser
*p
,
1207 union drm_amdgpu_cs
*cs
)
1209 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
1210 struct drm_sched_entity
*entity
= p
->entity
;
1211 struct amdgpu_bo_list_entry
*e
;
1212 struct amdgpu_job
*job
;
1219 r
= drm_sched_job_init(&job
->base
, entity
, &fpriv
->vm
);
1223 /* No memory allocation is allowed while holding the notifier lock.
1224 * The lock is held until amdgpu_cs_submit is finished and fence is
1227 mutex_lock(&p
->adev
->notifier_lock
);
1229 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1230 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1232 amdgpu_bo_list_for_each_userptr_entry(e
, p
->bo_list
) {
1233 struct amdgpu_bo
*bo
= ttm_to_amdgpu_bo(e
->tv
.bo
);
1235 r
|= !amdgpu_ttm_tt_get_user_pages_done(bo
->tbo
.ttm
);
1242 p
->fence
= dma_fence_get(&job
->base
.s_fence
->finished
);
1244 amdgpu_ctx_add_fence(p
->ctx
, entity
, p
->fence
, &seq
);
1245 amdgpu_cs_post_dependencies(p
);
1247 if ((job
->preamble_status
& AMDGPU_PREAMBLE_IB_PRESENT
) &&
1248 !p
->ctx
->preamble_presented
) {
1249 job
->preamble_status
|= AMDGPU_PREAMBLE_IB_PRESENT_FIRST
;
1250 p
->ctx
->preamble_presented
= true;
1253 cs
->out
.handle
= seq
;
1254 job
->uf_sequence
= seq
;
1256 amdgpu_job_free_resources(job
);
1258 trace_amdgpu_cs_ioctl(job
);
1259 amdgpu_vm_bo_trace_cs(&fpriv
->vm
, &p
->ticket
);
1260 drm_sched_entity_push_job(&job
->base
, entity
);
1262 amdgpu_vm_move_to_lru_tail(p
->adev
, &fpriv
->vm
);
1264 ttm_eu_fence_buffer_objects(&p
->ticket
, &p
->validated
, p
->fence
);
1265 mutex_unlock(&p
->adev
->notifier_lock
);
1270 drm_sched_job_cleanup(&job
->base
);
1271 mutex_unlock(&p
->adev
->notifier_lock
);
1274 amdgpu_job_free(job
);
1278 int amdgpu_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
1280 struct amdgpu_device
*adev
= dev
->dev_private
;
1281 union drm_amdgpu_cs
*cs
= data
;
1282 struct amdgpu_cs_parser parser
= {};
1283 bool reserved_buffers
= false;
1286 if (amdgpu_ras_intr_triggered())
1289 if (!adev
->accel_working
)
1295 r
= amdgpu_cs_parser_init(&parser
, data
);
1297 DRM_ERROR("Failed to initialize parser %d!\n", r
);
1301 r
= amdgpu_cs_ib_fill(adev
, &parser
);
1305 r
= amdgpu_cs_dependencies(adev
, &parser
);
1307 DRM_ERROR("Failed in the dependencies handling %d!\n", r
);
1311 r
= amdgpu_cs_parser_bos(&parser
, data
);
1314 DRM_ERROR("Not enough memory for command submission!\n");
1315 else if (r
!= -ERESTARTSYS
&& r
!= -EAGAIN
)
1316 DRM_ERROR("Failed to process the buffer list %d!\n", r
);
1320 reserved_buffers
= true;
1322 for (i
= 0; i
< parser
.job
->num_ibs
; i
++)
1323 trace_amdgpu_cs(&parser
, i
);
1325 r
= amdgpu_cs_vm_handling(&parser
);
1329 r
= amdgpu_cs_submit(&parser
, cs
);
1332 amdgpu_cs_parser_fini(&parser
, r
, reserved_buffers
);
1338 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1341 * @data: data from userspace
1342 * @filp: file private
1344 * Wait for the command submission identified by handle to finish.
1346 int amdgpu_cs_wait_ioctl(struct drm_device
*dev
, void *data
,
1347 struct drm_file
*filp
)
1349 union drm_amdgpu_wait_cs
*wait
= data
;
1350 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout
);
1351 struct drm_sched_entity
*entity
;
1352 struct amdgpu_ctx
*ctx
;
1353 struct dma_fence
*fence
;
1356 ctx
= amdgpu_ctx_get(filp
->driver_priv
, wait
->in
.ctx_id
);
1360 r
= amdgpu_ctx_get_entity(ctx
, wait
->in
.ip_type
, wait
->in
.ip_instance
,
1361 wait
->in
.ring
, &entity
);
1363 amdgpu_ctx_put(ctx
);
1367 fence
= amdgpu_ctx_get_fence(ctx
, entity
, wait
->in
.handle
);
1371 r
= dma_fence_wait_timeout(fence
, true, timeout
);
1372 if (r
> 0 && fence
->error
)
1374 dma_fence_put(fence
);
1378 amdgpu_ctx_put(ctx
);
1382 memset(wait
, 0, sizeof(*wait
));
1383 wait
->out
.status
= (r
== 0);
1389 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1391 * @adev: amdgpu device
1392 * @filp: file private
1393 * @user: drm_amdgpu_fence copied from user space
1395 static struct dma_fence
*amdgpu_cs_get_fence(struct amdgpu_device
*adev
,
1396 struct drm_file
*filp
,
1397 struct drm_amdgpu_fence
*user
)
1399 struct drm_sched_entity
*entity
;
1400 struct amdgpu_ctx
*ctx
;
1401 struct dma_fence
*fence
;
1404 ctx
= amdgpu_ctx_get(filp
->driver_priv
, user
->ctx_id
);
1406 return ERR_PTR(-EINVAL
);
1408 r
= amdgpu_ctx_get_entity(ctx
, user
->ip_type
, user
->ip_instance
,
1409 user
->ring
, &entity
);
1411 amdgpu_ctx_put(ctx
);
1415 fence
= amdgpu_ctx_get_fence(ctx
, entity
, user
->seq_no
);
1416 amdgpu_ctx_put(ctx
);
1421 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device
*dev
, void *data
,
1422 struct drm_file
*filp
)
1424 struct amdgpu_device
*adev
= dev
->dev_private
;
1425 union drm_amdgpu_fence_to_handle
*info
= data
;
1426 struct dma_fence
*fence
;
1427 struct drm_syncobj
*syncobj
;
1428 struct sync_file
*sync_file
;
1431 fence
= amdgpu_cs_get_fence(adev
, filp
, &info
->in
.fence
);
1433 return PTR_ERR(fence
);
1436 fence
= dma_fence_get_stub();
1438 switch (info
->in
.what
) {
1439 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ
:
1440 r
= drm_syncobj_create(&syncobj
, 0, fence
);
1441 dma_fence_put(fence
);
1444 r
= drm_syncobj_get_handle(filp
, syncobj
, &info
->out
.handle
);
1445 drm_syncobj_put(syncobj
);
1448 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD
:
1449 r
= drm_syncobj_create(&syncobj
, 0, fence
);
1450 dma_fence_put(fence
);
1453 r
= drm_syncobj_get_fd(syncobj
, (int*)&info
->out
.handle
);
1454 drm_syncobj_put(syncobj
);
1457 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD
:
1458 fd
= get_unused_fd_flags(O_CLOEXEC
);
1460 dma_fence_put(fence
);
1464 sync_file
= sync_file_create(fence
);
1465 dma_fence_put(fence
);
1471 fd_install(fd
, sync_file
->file
);
1472 info
->out
.handle
= fd
;
1481 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1483 * @adev: amdgpu device
1484 * @filp: file private
1485 * @wait: wait parameters
1486 * @fences: array of drm_amdgpu_fence
1488 static int amdgpu_cs_wait_all_fences(struct amdgpu_device
*adev
,
1489 struct drm_file
*filp
,
1490 union drm_amdgpu_wait_fences
*wait
,
1491 struct drm_amdgpu_fence
*fences
)
1493 uint32_t fence_count
= wait
->in
.fence_count
;
1497 for (i
= 0; i
< fence_count
; i
++) {
1498 struct dma_fence
*fence
;
1499 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout_ns
);
1501 fence
= amdgpu_cs_get_fence(adev
, filp
, &fences
[i
]);
1503 return PTR_ERR(fence
);
1507 r
= dma_fence_wait_timeout(fence
, true, timeout
);
1508 dma_fence_put(fence
);
1516 return fence
->error
;
1519 memset(wait
, 0, sizeof(*wait
));
1520 wait
->out
.status
= (r
> 0);
1526 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1528 * @adev: amdgpu device
1529 * @filp: file private
1530 * @wait: wait parameters
1531 * @fences: array of drm_amdgpu_fence
1533 static int amdgpu_cs_wait_any_fence(struct amdgpu_device
*adev
,
1534 struct drm_file
*filp
,
1535 union drm_amdgpu_wait_fences
*wait
,
1536 struct drm_amdgpu_fence
*fences
)
1538 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout_ns
);
1539 uint32_t fence_count
= wait
->in
.fence_count
;
1540 uint32_t first
= ~0;
1541 struct dma_fence
**array
;
1545 /* Prepare the fence array */
1546 array
= kcalloc(fence_count
, sizeof(struct dma_fence
*), GFP_KERNEL
);
1551 for (i
= 0; i
< fence_count
; i
++) {
1552 struct dma_fence
*fence
;
1554 fence
= amdgpu_cs_get_fence(adev
, filp
, &fences
[i
]);
1555 if (IS_ERR(fence
)) {
1557 goto err_free_fence_array
;
1560 } else { /* NULL, the fence has been already signaled */
1567 r
= dma_fence_wait_any_timeout(array
, fence_count
, true, timeout
,
1570 goto err_free_fence_array
;
1573 memset(wait
, 0, sizeof(*wait
));
1574 wait
->out
.status
= (r
> 0);
1575 wait
->out
.first_signaled
= first
;
1577 if (first
< fence_count
&& array
[first
])
1578 r
= array
[first
]->error
;
1582 err_free_fence_array
:
1583 for (i
= 0; i
< fence_count
; i
++)
1584 dma_fence_put(array
[i
]);
1591 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1594 * @data: data from userspace
1595 * @filp: file private
1597 int amdgpu_cs_wait_fences_ioctl(struct drm_device
*dev
, void *data
,
1598 struct drm_file
*filp
)
1600 struct amdgpu_device
*adev
= dev
->dev_private
;
1601 union drm_amdgpu_wait_fences
*wait
= data
;
1602 uint32_t fence_count
= wait
->in
.fence_count
;
1603 struct drm_amdgpu_fence
*fences_user
;
1604 struct drm_amdgpu_fence
*fences
;
1607 /* Get the fences from userspace */
1608 fences
= kmalloc_array(fence_count
, sizeof(struct drm_amdgpu_fence
),
1613 fences_user
= u64_to_user_ptr(wait
->in
.fences
);
1614 if (copy_from_user(fences
, fences_user
,
1615 sizeof(struct drm_amdgpu_fence
) * fence_count
)) {
1617 goto err_free_fences
;
1620 if (wait
->in
.wait_all
)
1621 r
= amdgpu_cs_wait_all_fences(adev
, filp
, wait
, fences
);
1623 r
= amdgpu_cs_wait_any_fence(adev
, filp
, wait
, fences
);
1632 * amdgpu_cs_find_bo_va - find bo_va for VM address
1634 * @parser: command submission parser context
1636 * @bo: resulting BO of the mapping found
1638 * Search the buffer objects in the command submission context for a certain
1639 * virtual memory address. Returns allocation structure when found, NULL
1642 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser
*parser
,
1643 uint64_t addr
, struct amdgpu_bo
**bo
,
1644 struct amdgpu_bo_va_mapping
**map
)
1646 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
1647 struct ttm_operation_ctx ctx
= { false, false };
1648 struct amdgpu_vm
*vm
= &fpriv
->vm
;
1649 struct amdgpu_bo_va_mapping
*mapping
;
1652 addr
/= AMDGPU_GPU_PAGE_SIZE
;
1654 mapping
= amdgpu_vm_bo_lookup_mapping(vm
, addr
);
1655 if (!mapping
|| !mapping
->bo_va
|| !mapping
->bo_va
->base
.bo
)
1658 *bo
= mapping
->bo_va
->base
.bo
;
1661 /* Double check that the BO is reserved by this CS */
1662 if (dma_resv_locking_ctx((*bo
)->tbo
.base
.resv
) != &parser
->ticket
)
1665 if (!((*bo
)->flags
& AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
)) {
1666 (*bo
)->flags
|= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
1667 amdgpu_bo_placement_from_domain(*bo
, (*bo
)->allowed_domains
);
1668 r
= ttm_bo_validate(&(*bo
)->tbo
, &(*bo
)->placement
, &ctx
);
1673 return amdgpu_ttm_alloc_gart(&(*bo
)->tbo
);