2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
37 /* Userptr restore delay, just long enough to allow consecutive VM
38 * changes to accumulate
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
42 /* Impose limit on how much memory KFD can use */
44 uint64_t max_system_mem_limit
;
45 uint64_t max_ttm_mem_limit
;
46 int64_t system_mem_used
;
48 spinlock_t mem_limit_lock
;
51 /* Struct used for amdgpu_amdkfd_bo_validate */
52 struct amdgpu_vm_parser
{
57 static const char * const domain_bit_to_string
[] = {
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct
*work
);
71 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
73 return (struct amdgpu_device
*)kgd
;
76 static bool check_if_add_bo_to_vm(struct amdgpu_vm
*avm
,
79 struct kfd_bo_va_list
*entry
;
81 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
)
82 if (entry
->bo_va
->base
.vm
== avm
)
88 /* Set memory usage limits. Current, limits are
89 * System (TTM + userptr) memory - 15/16th System RAM
90 * TTM memory - 3/8th System RAM
92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
98 mem
= si
.totalram
- si
.totalhigh
;
101 spin_lock_init(&kfd_mem_limit
.mem_limit_lock
);
102 kfd_mem_limit
.max_system_mem_limit
= mem
- (mem
>> 4);
103 kfd_mem_limit
.max_ttm_mem_limit
= (mem
>> 1) - (mem
>> 3);
104 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
105 (kfd_mem_limit
.max_system_mem_limit
>> 20),
106 (kfd_mem_limit
.max_ttm_mem_limit
>> 20));
109 /* Estimate page table size needed to represent a given memory size
111 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
112 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
113 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
114 * for 2MB pages for TLB efficiency. However, small allocations and
115 * fragmented system memory still need some 4KB pages. We choose a
116 * compromise that should work in most cases without reserving too
117 * much memory for page tables unnecessarily (factor 16K, >> 14).
119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device
*adev
,
122 uint64_t size
, u32 domain
, bool sg
)
124 uint64_t reserved_for_pt
=
125 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size
);
126 size_t acc_size
, system_mem_needed
, ttm_mem_needed
, vram_needed
;
129 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
130 sizeof(struct amdgpu_bo
));
133 if (domain
== AMDGPU_GEM_DOMAIN_GTT
) {
135 system_mem_needed
= acc_size
+ size
;
136 ttm_mem_needed
= acc_size
+ size
;
137 } else if (domain
== AMDGPU_GEM_DOMAIN_CPU
&& !sg
) {
139 system_mem_needed
= acc_size
+ size
;
140 ttm_mem_needed
= acc_size
;
143 system_mem_needed
= acc_size
;
144 ttm_mem_needed
= acc_size
;
145 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
)
149 spin_lock(&kfd_mem_limit
.mem_limit_lock
);
151 if ((kfd_mem_limit
.system_mem_used
+ system_mem_needed
>
152 kfd_mem_limit
.max_system_mem_limit
) ||
153 (kfd_mem_limit
.ttm_mem_used
+ ttm_mem_needed
>
154 kfd_mem_limit
.max_ttm_mem_limit
) ||
155 (adev
->kfd
.vram_used
+ vram_needed
>
156 adev
->gmc
.real_vram_size
- reserved_for_pt
)) {
159 kfd_mem_limit
.system_mem_used
+= system_mem_needed
;
160 kfd_mem_limit
.ttm_mem_used
+= ttm_mem_needed
;
161 adev
->kfd
.vram_used
+= vram_needed
;
164 spin_unlock(&kfd_mem_limit
.mem_limit_lock
);
168 static void unreserve_mem_limit(struct amdgpu_device
*adev
,
169 uint64_t size
, u32 domain
, bool sg
)
173 acc_size
= ttm_bo_dma_acc_size(&adev
->mman
.bdev
, size
,
174 sizeof(struct amdgpu_bo
));
176 spin_lock(&kfd_mem_limit
.mem_limit_lock
);
177 if (domain
== AMDGPU_GEM_DOMAIN_GTT
) {
178 kfd_mem_limit
.system_mem_used
-= (acc_size
+ size
);
179 kfd_mem_limit
.ttm_mem_used
-= (acc_size
+ size
);
180 } else if (domain
== AMDGPU_GEM_DOMAIN_CPU
&& !sg
) {
181 kfd_mem_limit
.system_mem_used
-= (acc_size
+ size
);
182 kfd_mem_limit
.ttm_mem_used
-= acc_size
;
184 kfd_mem_limit
.system_mem_used
-= acc_size
;
185 kfd_mem_limit
.ttm_mem_used
-= acc_size
;
186 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
187 adev
->kfd
.vram_used
-= size
;
188 WARN_ONCE(adev
->kfd
.vram_used
< 0,
189 "kfd VRAM memory accounting unbalanced");
192 WARN_ONCE(kfd_mem_limit
.system_mem_used
< 0,
193 "kfd system memory accounting unbalanced");
194 WARN_ONCE(kfd_mem_limit
.ttm_mem_used
< 0,
195 "kfd TTM memory accounting unbalanced");
197 spin_unlock(&kfd_mem_limit
.mem_limit_lock
);
200 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo
*bo
)
202 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
203 u32 domain
= bo
->preferred_domains
;
204 bool sg
= (bo
->preferred_domains
== AMDGPU_GEM_DOMAIN_CPU
);
206 if (bo
->flags
& AMDGPU_AMDKFD_USERPTR_BO
) {
207 domain
= AMDGPU_GEM_DOMAIN_CPU
;
211 unreserve_mem_limit(adev
, amdgpu_bo_size(bo
), domain
, sg
);
215 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
216 * reservation object.
218 * @bo: [IN] Remove eviction fence(s) from this BO
219 * @ef: [IN] This eviction fence is removed if it
220 * is present in the shared list.
222 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
224 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo
*bo
,
225 struct amdgpu_amdkfd_fence
*ef
)
227 struct dma_resv
*resv
= bo
->tbo
.base
.resv
;
228 struct dma_resv_list
*old
, *new;
229 unsigned int i
, j
, k
;
234 old
= dma_resv_get_list(resv
);
238 new = kmalloc(offsetof(typeof(*new), shared
[old
->shared_max
]),
243 /* Go through all the shared fences in the resevation object and sort
244 * the interesting ones to the end of the list.
246 for (i
= 0, j
= old
->shared_count
, k
= 0; i
< old
->shared_count
; ++i
) {
249 f
= rcu_dereference_protected(old
->shared
[i
],
250 dma_resv_held(resv
));
252 if (f
->context
== ef
->base
.context
)
253 RCU_INIT_POINTER(new->shared
[--j
], f
);
255 RCU_INIT_POINTER(new->shared
[k
++], f
);
257 new->shared_max
= old
->shared_max
;
258 new->shared_count
= k
;
260 /* Install the new fence list, seqcount provides the barriers */
262 write_seqcount_begin(&resv
->seq
);
263 RCU_INIT_POINTER(resv
->fence
, new);
264 write_seqcount_end(&resv
->seq
);
267 /* Drop the references to the removed fences or move them to ef_list */
268 for (i
= j
, k
= 0; i
< old
->shared_count
; ++i
) {
271 f
= rcu_dereference_protected(new->shared
[i
],
272 dma_resv_held(resv
));
280 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo
*bo
)
282 struct amdgpu_bo
*root
= bo
;
283 struct amdgpu_vm_bo_base
*vm_bo
;
284 struct amdgpu_vm
*vm
;
285 struct amdkfd_process_info
*info
;
286 struct amdgpu_amdkfd_fence
*ef
;
289 /* we can always get vm_bo from root PD bo.*/
301 info
= vm
->process_info
;
302 if (!info
|| !info
->eviction_fence
)
305 ef
= container_of(dma_fence_get(&info
->eviction_fence
->base
),
306 struct amdgpu_amdkfd_fence
, base
);
308 BUG_ON(!dma_resv_trylock(bo
->tbo
.base
.resv
));
309 ret
= amdgpu_amdkfd_remove_eviction_fence(bo
, ef
);
310 dma_resv_unlock(bo
->tbo
.base
.resv
);
312 dma_fence_put(&ef
->base
);
316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo
*bo
, uint32_t domain
,
319 struct ttm_operation_ctx ctx
= { false, false };
322 if (WARN(amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
),
323 "Called with userptr BO"))
326 amdgpu_bo_placement_from_domain(bo
, domain
);
328 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
332 amdgpu_bo_sync_wait(bo
, AMDGPU_FENCE_OWNER_KFD
, false);
338 static int amdgpu_amdkfd_validate(void *param
, struct amdgpu_bo
*bo
)
340 struct amdgpu_vm_parser
*p
= param
;
342 return amdgpu_amdkfd_bo_validate(bo
, p
->domain
, p
->wait
);
345 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
347 * Page directories are not updated here because huge page handling
348 * during page table updates can invalidate page directory entries
349 * again. Page directories are only updated after updating page
352 static int vm_validate_pt_pd_bos(struct amdgpu_vm
*vm
)
354 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
355 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
356 struct amdgpu_vm_parser param
;
359 param
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
362 ret
= amdgpu_vm_validate_pt_bos(adev
, vm
, amdgpu_amdkfd_validate
,
365 pr_err("failed to validate PT BOs\n");
369 ret
= amdgpu_amdkfd_validate(¶m
, pd
);
371 pr_err("failed to validate PD\n");
375 vm
->pd_phys_addr
= amdgpu_gmc_pd_addr(vm
->root
.base
.bo
);
377 if (vm
->use_cpu_for_update
) {
378 ret
= amdgpu_bo_kmap(pd
, NULL
);
380 pr_err("failed to kmap PD, ret=%d\n", ret
);
388 static int vm_update_pds(struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
390 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
391 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
394 ret
= amdgpu_vm_update_pdes(adev
, vm
, false);
398 return amdgpu_sync_fence(sync
, vm
->last_update
, false);
401 static uint64_t get_pte_flags(struct amdgpu_device
*adev
, struct kgd_mem
*mem
)
403 struct amdgpu_device
*bo_adev
= amdgpu_ttm_adev(mem
->bo
->tbo
.bdev
);
404 bool coherent
= mem
->alloc_flags
& KFD_IOC_ALLOC_MEM_FLAGS_COHERENT
;
405 uint32_t mapping_flags
;
407 mapping_flags
= AMDGPU_VM_PAGE_READABLE
;
408 if (mem
->alloc_flags
& KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
)
409 mapping_flags
|= AMDGPU_VM_PAGE_WRITEABLE
;
410 if (mem
->alloc_flags
& KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE
)
411 mapping_flags
|= AMDGPU_VM_PAGE_EXECUTABLE
;
413 switch (adev
->asic_type
) {
415 if (mem
->alloc_flags
& KFD_IOC_ALLOC_MEM_FLAGS_VRAM
) {
417 mapping_flags
|= coherent
?
418 AMDGPU_VM_MTYPE_CC
: AMDGPU_VM_MTYPE_RW
;
420 mapping_flags
|= AMDGPU_VM_MTYPE_UC
;
422 mapping_flags
|= coherent
?
423 AMDGPU_VM_MTYPE_UC
: AMDGPU_VM_MTYPE_NC
;
427 mapping_flags
|= coherent
?
428 AMDGPU_VM_MTYPE_UC
: AMDGPU_VM_MTYPE_NC
;
431 return amdgpu_gem_va_map_flags(adev
, mapping_flags
);
434 /* add_bo_to_vm - Add a BO to a VM
436 * Everything that needs to bo done only once when a BO is first added
437 * to a VM. It can later be mapped and unmapped many times without
438 * repeating these steps.
440 * 1. Allocate and initialize BO VA entry data structure
441 * 2. Add BO to the VM
442 * 3. Determine ASIC-specific PTE flags
443 * 4. Alloc page tables and directories if needed
444 * 4a. Validate new page tables and directories
446 static int add_bo_to_vm(struct amdgpu_device
*adev
, struct kgd_mem
*mem
,
447 struct amdgpu_vm
*vm
, bool is_aql
,
448 struct kfd_bo_va_list
**p_bo_va_entry
)
451 struct kfd_bo_va_list
*bo_va_entry
;
452 struct amdgpu_bo
*bo
= mem
->bo
;
453 uint64_t va
= mem
->va
;
454 struct list_head
*list_bo_va
= &mem
->bo_va_list
;
455 unsigned long bo_size
= bo
->tbo
.mem
.size
;
458 pr_err("Invalid VA when adding BO to VM\n");
465 bo_va_entry
= kzalloc(sizeof(*bo_va_entry
), GFP_KERNEL
);
469 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va
,
472 /* Add BO to VM internal data structures*/
473 bo_va_entry
->bo_va
= amdgpu_vm_bo_add(adev
, vm
, bo
);
474 if (!bo_va_entry
->bo_va
) {
476 pr_err("Failed to add BO object to VM. ret == %d\n",
481 bo_va_entry
->va
= va
;
482 bo_va_entry
->pte_flags
= get_pte_flags(adev
, mem
);
483 bo_va_entry
->kgd_dev
= (void *)adev
;
484 list_add(&bo_va_entry
->bo_list
, list_bo_va
);
487 *p_bo_va_entry
= bo_va_entry
;
489 /* Allocate validate page tables if needed */
490 ret
= vm_validate_pt_pd_bos(vm
);
492 pr_err("validate_pt_pd_bos() failed\n");
499 amdgpu_vm_bo_rmv(adev
, bo_va_entry
->bo_va
);
500 list_del(&bo_va_entry
->bo_list
);
506 static void remove_bo_from_vm(struct amdgpu_device
*adev
,
507 struct kfd_bo_va_list
*entry
, unsigned long size
)
509 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
511 entry
->va
+ size
, entry
);
512 amdgpu_vm_bo_rmv(adev
, entry
->bo_va
);
513 list_del(&entry
->bo_list
);
517 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem
*mem
,
518 struct amdkfd_process_info
*process_info
,
521 struct ttm_validate_buffer
*entry
= &mem
->validate_list
;
522 struct amdgpu_bo
*bo
= mem
->bo
;
524 INIT_LIST_HEAD(&entry
->head
);
525 entry
->num_shared
= 1;
526 entry
->bo
= &bo
->tbo
;
527 mutex_lock(&process_info
->lock
);
529 list_add_tail(&entry
->head
, &process_info
->userptr_valid_list
);
531 list_add_tail(&entry
->head
, &process_info
->kfd_bo_list
);
532 mutex_unlock(&process_info
->lock
);
535 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem
*mem
,
536 struct amdkfd_process_info
*process_info
)
538 struct ttm_validate_buffer
*bo_list_entry
;
540 bo_list_entry
= &mem
->validate_list
;
541 mutex_lock(&process_info
->lock
);
542 list_del(&bo_list_entry
->head
);
543 mutex_unlock(&process_info
->lock
);
546 /* Initializes user pages. It registers the MMU notifier and validates
547 * the userptr BO in the GTT domain.
549 * The BO must already be on the userptr_valid_list. Otherwise an
550 * eviction and restore may happen that leaves the new BO unmapped
551 * with the user mode queues running.
553 * Takes the process_info->lock to protect against concurrent restore
556 * Returns 0 for success, negative errno for errors.
558 static int init_user_pages(struct kgd_mem
*mem
, uint64_t user_addr
)
560 struct amdkfd_process_info
*process_info
= mem
->process_info
;
561 struct amdgpu_bo
*bo
= mem
->bo
;
562 struct ttm_operation_ctx ctx
= { true, false };
565 mutex_lock(&process_info
->lock
);
567 ret
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, user_addr
, 0);
569 pr_err("%s: Failed to set userptr: %d\n", __func__
, ret
);
573 ret
= amdgpu_mn_register(bo
, user_addr
);
575 pr_err("%s: Failed to register MMU notifier: %d\n",
580 ret
= amdgpu_ttm_tt_get_user_pages(bo
, bo
->tbo
.ttm
->pages
);
582 pr_err("%s: Failed to get user pages: %d\n", __func__
, ret
);
586 ret
= amdgpu_bo_reserve(bo
, true);
588 pr_err("%s: Failed to reserve BO\n", __func__
);
591 amdgpu_bo_placement_from_domain(bo
, mem
->domain
);
592 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
594 pr_err("%s: failed to validate BO\n", __func__
);
595 amdgpu_bo_unreserve(bo
);
598 amdgpu_ttm_tt_get_user_pages_done(bo
->tbo
.ttm
);
601 amdgpu_mn_unregister(bo
);
603 mutex_unlock(&process_info
->lock
);
607 /* Reserving a BO and its page table BOs must happen atomically to
608 * avoid deadlocks. Some operations update multiple VMs at once. Track
609 * all the reservation info in a context structure. Optionally a sync
610 * object can track VM updates.
612 struct bo_vm_reservation_context
{
613 struct amdgpu_bo_list_entry kfd_bo
; /* BO list entry for the KFD BO */
614 unsigned int n_vms
; /* Number of VMs reserved */
615 struct amdgpu_bo_list_entry
*vm_pd
; /* Array of VM BO list entries */
616 struct ww_acquire_ctx ticket
; /* Reservation ticket */
617 struct list_head list
, duplicates
; /* BO lists */
618 struct amdgpu_sync
*sync
; /* Pointer to sync object */
619 bool reserved
; /* Whether BOs are reserved */
623 BO_VM_NOT_MAPPED
= 0, /* Match VMs where a BO is not mapped */
624 BO_VM_MAPPED
, /* Match VMs where a BO is mapped */
625 BO_VM_ALL
, /* Match all VMs a BO was added to */
629 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
630 * @mem: KFD BO structure.
631 * @vm: the VM to reserve.
632 * @ctx: the struct that will be used in unreserve_bo_and_vms().
634 static int reserve_bo_and_vm(struct kgd_mem
*mem
,
635 struct amdgpu_vm
*vm
,
636 struct bo_vm_reservation_context
*ctx
)
638 struct amdgpu_bo
*bo
= mem
->bo
;
643 ctx
->reserved
= false;
645 ctx
->sync
= &mem
->sync
;
647 INIT_LIST_HEAD(&ctx
->list
);
648 INIT_LIST_HEAD(&ctx
->duplicates
);
650 ctx
->vm_pd
= kcalloc(ctx
->n_vms
, sizeof(*ctx
->vm_pd
), GFP_KERNEL
);
654 ctx
->kfd_bo
.priority
= 0;
655 ctx
->kfd_bo
.tv
.bo
= &bo
->tbo
;
656 ctx
->kfd_bo
.tv
.num_shared
= 1;
657 list_add(&ctx
->kfd_bo
.tv
.head
, &ctx
->list
);
659 amdgpu_vm_get_pd_bo(vm
, &ctx
->list
, &ctx
->vm_pd
[0]);
661 ret
= ttm_eu_reserve_buffers(&ctx
->ticket
, &ctx
->list
,
662 false, &ctx
->duplicates
);
664 pr_err("Failed to reserve buffers in ttm.\n");
670 ctx
->reserved
= true;
675 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
676 * @mem: KFD BO structure.
677 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
678 * is used. Otherwise, a single VM associated with the BO.
679 * @map_type: the mapping status that will be used to filter the VMs.
680 * @ctx: the struct that will be used in unreserve_bo_and_vms().
682 * Returns 0 for success, negative for failure.
684 static int reserve_bo_and_cond_vms(struct kgd_mem
*mem
,
685 struct amdgpu_vm
*vm
, enum bo_vm_match map_type
,
686 struct bo_vm_reservation_context
*ctx
)
688 struct amdgpu_bo
*bo
= mem
->bo
;
689 struct kfd_bo_va_list
*entry
;
693 ctx
->reserved
= false;
696 ctx
->sync
= &mem
->sync
;
698 INIT_LIST_HEAD(&ctx
->list
);
699 INIT_LIST_HEAD(&ctx
->duplicates
);
701 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
702 if ((vm
&& vm
!= entry
->bo_va
->base
.vm
) ||
703 (entry
->is_mapped
!= map_type
704 && map_type
!= BO_VM_ALL
))
710 if (ctx
->n_vms
!= 0) {
711 ctx
->vm_pd
= kcalloc(ctx
->n_vms
, sizeof(*ctx
->vm_pd
),
717 ctx
->kfd_bo
.priority
= 0;
718 ctx
->kfd_bo
.tv
.bo
= &bo
->tbo
;
719 ctx
->kfd_bo
.tv
.num_shared
= 1;
720 list_add(&ctx
->kfd_bo
.tv
.head
, &ctx
->list
);
723 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
724 if ((vm
&& vm
!= entry
->bo_va
->base
.vm
) ||
725 (entry
->is_mapped
!= map_type
726 && map_type
!= BO_VM_ALL
))
729 amdgpu_vm_get_pd_bo(entry
->bo_va
->base
.vm
, &ctx
->list
,
734 ret
= ttm_eu_reserve_buffers(&ctx
->ticket
, &ctx
->list
,
735 false, &ctx
->duplicates
);
737 pr_err("Failed to reserve buffers in ttm.\n");
743 ctx
->reserved
= true;
748 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
749 * @ctx: Reservation context to unreserve
750 * @wait: Optionally wait for a sync object representing pending VM updates
751 * @intr: Whether the wait is interruptible
753 * Also frees any resources allocated in
754 * reserve_bo_and_(cond_)vm(s). Returns the status from
757 static int unreserve_bo_and_vms(struct bo_vm_reservation_context
*ctx
,
758 bool wait
, bool intr
)
763 ret
= amdgpu_sync_wait(ctx
->sync
, intr
);
766 ttm_eu_backoff_reservation(&ctx
->ticket
, &ctx
->list
);
771 ctx
->reserved
= false;
777 static int unmap_bo_from_gpuvm(struct amdgpu_device
*adev
,
778 struct kfd_bo_va_list
*entry
,
779 struct amdgpu_sync
*sync
)
781 struct amdgpu_bo_va
*bo_va
= entry
->bo_va
;
782 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
784 amdgpu_vm_bo_unmap(adev
, bo_va
, entry
->va
);
786 amdgpu_vm_clear_freed(adev
, vm
, &bo_va
->last_pt_update
);
788 amdgpu_sync_fence(sync
, bo_va
->last_pt_update
, false);
793 static int update_gpuvm_pte(struct amdgpu_device
*adev
,
794 struct kfd_bo_va_list
*entry
,
795 struct amdgpu_sync
*sync
)
798 struct amdgpu_bo_va
*bo_va
= entry
->bo_va
;
800 /* Update the page tables */
801 ret
= amdgpu_vm_bo_update(adev
, bo_va
, false);
803 pr_err("amdgpu_vm_bo_update failed\n");
807 return amdgpu_sync_fence(sync
, bo_va
->last_pt_update
, false);
810 static int map_bo_to_gpuvm(struct amdgpu_device
*adev
,
811 struct kfd_bo_va_list
*entry
, struct amdgpu_sync
*sync
,
816 /* Set virtual address for the allocation */
817 ret
= amdgpu_vm_bo_map(adev
, entry
->bo_va
, entry
->va
, 0,
818 amdgpu_bo_size(entry
->bo_va
->base
.bo
),
821 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
829 ret
= update_gpuvm_pte(adev
, entry
, sync
);
831 pr_err("update_gpuvm_pte() failed\n");
832 goto update_gpuvm_pte_failed
;
837 update_gpuvm_pte_failed
:
838 unmap_bo_from_gpuvm(adev
, entry
, sync
);
842 static struct sg_table
*create_doorbell_sg(uint64_t addr
, uint32_t size
)
844 struct sg_table
*sg
= kmalloc(sizeof(*sg
), GFP_KERNEL
);
848 if (sg_alloc_table(sg
, 1, GFP_KERNEL
)) {
852 sg
->sgl
->dma_address
= addr
;
853 sg
->sgl
->length
= size
;
854 #ifdef CONFIG_NEED_SG_DMA_LENGTH
855 sg
->sgl
->dma_length
= size
;
860 static int process_validate_vms(struct amdkfd_process_info
*process_info
)
862 struct amdgpu_vm
*peer_vm
;
865 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
867 ret
= vm_validate_pt_pd_bos(peer_vm
);
875 static int process_sync_pds_resv(struct amdkfd_process_info
*process_info
,
876 struct amdgpu_sync
*sync
)
878 struct amdgpu_vm
*peer_vm
;
881 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
883 struct amdgpu_bo
*pd
= peer_vm
->root
.base
.bo
;
885 ret
= amdgpu_sync_resv(NULL
, sync
, pd
->tbo
.base
.resv
,
886 AMDGPU_SYNC_NE_OWNER
,
887 AMDGPU_FENCE_OWNER_KFD
);
895 static int process_update_pds(struct amdkfd_process_info
*process_info
,
896 struct amdgpu_sync
*sync
)
898 struct amdgpu_vm
*peer_vm
;
901 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
903 ret
= vm_update_pds(peer_vm
, sync
);
911 static int init_kfd_vm(struct amdgpu_vm
*vm
, void **process_info
,
912 struct dma_fence
**ef
)
914 struct amdkfd_process_info
*info
= NULL
;
917 if (!*process_info
) {
918 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
922 mutex_init(&info
->lock
);
923 INIT_LIST_HEAD(&info
->vm_list_head
);
924 INIT_LIST_HEAD(&info
->kfd_bo_list
);
925 INIT_LIST_HEAD(&info
->userptr_valid_list
);
926 INIT_LIST_HEAD(&info
->userptr_inval_list
);
928 info
->eviction_fence
=
929 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
931 if (!info
->eviction_fence
) {
932 pr_err("Failed to create eviction fence\n");
934 goto create_evict_fence_fail
;
937 info
->pid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
938 atomic_set(&info
->evicted_bos
, 0);
939 INIT_DELAYED_WORK(&info
->restore_userptr_work
,
940 amdgpu_amdkfd_restore_userptr_worker
);
942 *process_info
= info
;
943 *ef
= dma_fence_get(&info
->eviction_fence
->base
);
946 vm
->process_info
= *process_info
;
948 /* Validate page directory and attach eviction fence */
949 ret
= amdgpu_bo_reserve(vm
->root
.base
.bo
, true);
951 goto reserve_pd_fail
;
952 ret
= vm_validate_pt_pd_bos(vm
);
954 pr_err("validate_pt_pd_bos() failed\n");
955 goto validate_pd_fail
;
957 ret
= amdgpu_bo_sync_wait(vm
->root
.base
.bo
,
958 AMDGPU_FENCE_OWNER_KFD
, false);
961 ret
= dma_resv_reserve_shared(vm
->root
.base
.bo
->tbo
.base
.resv
, 1);
963 goto reserve_shared_fail
;
964 amdgpu_bo_fence(vm
->root
.base
.bo
,
965 &vm
->process_info
->eviction_fence
->base
, true);
966 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
968 /* Update process info */
969 mutex_lock(&vm
->process_info
->lock
);
970 list_add_tail(&vm
->vm_list_node
,
971 &(vm
->process_info
->vm_list_head
));
972 vm
->process_info
->n_vms
++;
973 mutex_unlock(&vm
->process_info
->lock
);
980 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
982 vm
->process_info
= NULL
;
984 /* Two fence references: one in info and one in *ef */
985 dma_fence_put(&info
->eviction_fence
->base
);
988 *process_info
= NULL
;
990 create_evict_fence_fail
:
991 mutex_destroy(&info
->lock
);
997 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev
*kgd
, unsigned int pasid
,
998 void **vm
, void **process_info
,
999 struct dma_fence
**ef
)
1001 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1002 struct amdgpu_vm
*new_vm
;
1005 new_vm
= kzalloc(sizeof(*new_vm
), GFP_KERNEL
);
1009 /* Initialize AMDGPU part of the VM */
1010 ret
= amdgpu_vm_init(adev
, new_vm
, AMDGPU_VM_CONTEXT_COMPUTE
, pasid
);
1012 pr_err("Failed init vm ret %d\n", ret
);
1013 goto amdgpu_vm_init_fail
;
1016 /* Initialize KFD part of the VM and process info */
1017 ret
= init_kfd_vm(new_vm
, process_info
, ef
);
1019 goto init_kfd_vm_fail
;
1021 *vm
= (void *) new_vm
;
1026 amdgpu_vm_fini(adev
, new_vm
);
1027 amdgpu_vm_init_fail
:
1032 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev
*kgd
,
1033 struct file
*filp
, unsigned int pasid
,
1034 void **vm
, void **process_info
,
1035 struct dma_fence
**ef
)
1037 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1038 struct drm_file
*drm_priv
= filp
->private_data
;
1039 struct amdgpu_fpriv
*drv_priv
= drm_priv
->driver_priv
;
1040 struct amdgpu_vm
*avm
= &drv_priv
->vm
;
1043 /* Already a compute VM? */
1044 if (avm
->process_info
)
1047 /* Convert VM into a compute VM */
1048 ret
= amdgpu_vm_make_compute(adev
, avm
, pasid
);
1052 /* Initialize KFD part of the VM and process info */
1053 ret
= init_kfd_vm(avm
, process_info
, ef
);
1062 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
1063 struct amdgpu_vm
*vm
)
1065 struct amdkfd_process_info
*process_info
= vm
->process_info
;
1066 struct amdgpu_bo
*pd
= vm
->root
.base
.bo
;
1071 /* Release eviction fence from PD */
1072 amdgpu_bo_reserve(pd
, false);
1073 amdgpu_bo_fence(pd
, NULL
, false);
1074 amdgpu_bo_unreserve(pd
);
1076 /* Update process info */
1077 mutex_lock(&process_info
->lock
);
1078 process_info
->n_vms
--;
1079 list_del(&vm
->vm_list_node
);
1080 mutex_unlock(&process_info
->lock
);
1082 vm
->process_info
= NULL
;
1084 /* Release per-process resources when last compute VM is destroyed */
1085 if (!process_info
->n_vms
) {
1086 WARN_ON(!list_empty(&process_info
->kfd_bo_list
));
1087 WARN_ON(!list_empty(&process_info
->userptr_valid_list
));
1088 WARN_ON(!list_empty(&process_info
->userptr_inval_list
));
1090 dma_fence_put(&process_info
->eviction_fence
->base
);
1091 cancel_delayed_work_sync(&process_info
->restore_userptr_work
);
1092 put_pid(process_info
->pid
);
1093 mutex_destroy(&process_info
->lock
);
1094 kfree(process_info
);
1098 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev
*kgd
, void *vm
)
1100 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1101 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1103 if (WARN_ON(!kgd
|| !vm
))
1106 pr_debug("Destroying process vm %p\n", vm
);
1108 /* Release the VM context */
1109 amdgpu_vm_fini(adev
, avm
);
1113 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev
*kgd
, void *vm
)
1115 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1116 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1118 if (WARN_ON(!kgd
|| !vm
))
1121 pr_debug("Releasing process vm %p\n", vm
);
1123 /* The original pasid of amdgpu vm has already been
1124 * released during making a amdgpu vm to a compute vm
1125 * The current pasid is managed by kfd and will be
1126 * released on kfd process destroy. Set amdgpu pasid
1127 * to 0 to avoid duplicate release.
1129 amdgpu_vm_release_compute(adev
, avm
);
1132 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm
)
1134 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1135 struct amdgpu_bo
*pd
= avm
->root
.base
.bo
;
1136 struct amdgpu_device
*adev
= amdgpu_ttm_adev(pd
->tbo
.bdev
);
1138 if (adev
->asic_type
< CHIP_VEGA10
)
1139 return avm
->pd_phys_addr
>> AMDGPU_GPU_PAGE_SHIFT
;
1140 return avm
->pd_phys_addr
;
1143 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1144 struct kgd_dev
*kgd
, uint64_t va
, uint64_t size
,
1145 void *vm
, struct kgd_mem
**mem
,
1146 uint64_t *offset
, uint32_t flags
)
1148 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1149 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1150 enum ttm_bo_type bo_type
= ttm_bo_type_device
;
1151 struct sg_table
*sg
= NULL
;
1152 uint64_t user_addr
= 0;
1153 struct amdgpu_bo
*bo
;
1154 struct amdgpu_bo_param bp
;
1155 u32 domain
, alloc_domain
;
1160 * Check on which domain to allocate BO
1162 if (flags
& KFD_IOC_ALLOC_MEM_FLAGS_VRAM
) {
1163 domain
= alloc_domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1164 alloc_flags
= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE
;
1165 alloc_flags
|= (flags
& KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC
) ?
1166 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
:
1167 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
1168 } else if (flags
& KFD_IOC_ALLOC_MEM_FLAGS_GTT
) {
1169 domain
= alloc_domain
= AMDGPU_GEM_DOMAIN_GTT
;
1171 } else if (flags
& KFD_IOC_ALLOC_MEM_FLAGS_USERPTR
) {
1172 domain
= AMDGPU_GEM_DOMAIN_GTT
;
1173 alloc_domain
= AMDGPU_GEM_DOMAIN_CPU
;
1175 if (!offset
|| !*offset
)
1177 user_addr
= untagged_addr(*offset
);
1178 } else if (flags
& (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL
|
1179 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP
)) {
1180 domain
= AMDGPU_GEM_DOMAIN_GTT
;
1181 alloc_domain
= AMDGPU_GEM_DOMAIN_CPU
;
1182 bo_type
= ttm_bo_type_sg
;
1184 if (size
> UINT_MAX
)
1186 sg
= create_doorbell_sg(*offset
, size
);
1193 *mem
= kzalloc(sizeof(struct kgd_mem
), GFP_KERNEL
);
1198 INIT_LIST_HEAD(&(*mem
)->bo_va_list
);
1199 mutex_init(&(*mem
)->lock
);
1200 (*mem
)->aql_queue
= !!(flags
& KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM
);
1202 /* Workaround for AQL queue wraparound bug. Map the same
1203 * memory twice. That means we only actually allocate half
1206 if ((*mem
)->aql_queue
)
1209 (*mem
)->alloc_flags
= flags
;
1211 amdgpu_sync_create(&(*mem
)->sync
);
1213 ret
= amdgpu_amdkfd_reserve_mem_limit(adev
, size
, alloc_domain
, !!sg
);
1215 pr_debug("Insufficient system memory\n");
1216 goto err_reserve_limit
;
1219 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1220 va
, size
, domain_string(alloc_domain
));
1222 memset(&bp
, 0, sizeof(bp
));
1225 bp
.domain
= alloc_domain
;
1226 bp
.flags
= alloc_flags
;
1229 ret
= amdgpu_bo_create(adev
, &bp
, &bo
);
1231 pr_debug("Failed to create BO on domain %s. ret %d\n",
1232 domain_string(alloc_domain
), ret
);
1235 if (bo_type
== ttm_bo_type_sg
) {
1237 bo
->tbo
.ttm
->sg
= sg
;
1242 bo
->flags
|= AMDGPU_AMDKFD_USERPTR_BO
;
1245 (*mem
)->domain
= domain
;
1246 (*mem
)->mapped_to_gpu_memory
= 0;
1247 (*mem
)->process_info
= avm
->process_info
;
1248 add_kgd_mem_to_kfd_bo_list(*mem
, avm
->process_info
, user_addr
);
1251 ret
= init_user_pages(*mem
, user_addr
);
1253 goto allocate_init_user_pages_failed
;
1257 *offset
= amdgpu_bo_mmap_offset(bo
);
1261 allocate_init_user_pages_failed
:
1262 remove_kgd_mem_from_kfd_bo_list(*mem
, avm
->process_info
);
1263 amdgpu_bo_unref(&bo
);
1264 /* Don't unreserve system mem limit twice */
1265 goto err_reserve_limit
;
1267 unreserve_mem_limit(adev
, size
, alloc_domain
, !!sg
);
1269 mutex_destroy(&(*mem
)->lock
);
1279 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1280 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, uint64_t *size
)
1282 struct amdkfd_process_info
*process_info
= mem
->process_info
;
1283 unsigned long bo_size
= mem
->bo
->tbo
.mem
.size
;
1284 struct kfd_bo_va_list
*entry
, *tmp
;
1285 struct bo_vm_reservation_context ctx
;
1286 struct ttm_validate_buffer
*bo_list_entry
;
1287 unsigned int mapped_to_gpu_memory
;
1289 bool is_imported
= 0;
1291 mutex_lock(&mem
->lock
);
1292 mapped_to_gpu_memory
= mem
->mapped_to_gpu_memory
;
1293 is_imported
= mem
->is_imported
;
1294 mutex_unlock(&mem
->lock
);
1295 /* lock is not needed after this, since mem is unused and will
1299 if (mapped_to_gpu_memory
> 0) {
1300 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1305 /* Make sure restore workers don't access the BO any more */
1306 bo_list_entry
= &mem
->validate_list
;
1307 mutex_lock(&process_info
->lock
);
1308 list_del(&bo_list_entry
->head
);
1309 mutex_unlock(&process_info
->lock
);
1311 /* No more MMU notifiers */
1312 amdgpu_mn_unregister(mem
->bo
);
1314 ret
= reserve_bo_and_cond_vms(mem
, NULL
, BO_VM_ALL
, &ctx
);
1318 /* The eviction fence should be removed by the last unmap.
1319 * TODO: Log an error condition if the bo still has the eviction fence
1322 amdgpu_amdkfd_remove_eviction_fence(mem
->bo
,
1323 process_info
->eviction_fence
);
1324 pr_debug("Release VA 0x%llx - 0x%llx\n", mem
->va
,
1325 mem
->va
+ bo_size
* (1 + mem
->aql_queue
));
1327 /* Remove from VM internal data structures */
1328 list_for_each_entry_safe(entry
, tmp
, &mem
->bo_va_list
, bo_list
)
1329 remove_bo_from_vm((struct amdgpu_device
*)entry
->kgd_dev
,
1332 ret
= unreserve_bo_and_vms(&ctx
, false, false);
1334 /* Free the sync object */
1335 amdgpu_sync_free(&mem
->sync
);
1337 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1338 * remap BO. We need to free it.
1340 if (mem
->bo
->tbo
.sg
) {
1341 sg_free_table(mem
->bo
->tbo
.sg
);
1342 kfree(mem
->bo
->tbo
.sg
);
1345 /* Update the size of the BO being freed if it was allocated from
1346 * VRAM and is not imported.
1349 if ((mem
->bo
->preferred_domains
== AMDGPU_GEM_DOMAIN_VRAM
) &&
1357 drm_gem_object_put_unlocked(&mem
->bo
->tbo
.base
);
1358 mutex_destroy(&mem
->lock
);
1364 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1365 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, void *vm
)
1367 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1368 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1370 struct amdgpu_bo
*bo
;
1372 struct kfd_bo_va_list
*entry
;
1373 struct bo_vm_reservation_context ctx
;
1374 struct kfd_bo_va_list
*bo_va_entry
= NULL
;
1375 struct kfd_bo_va_list
*bo_va_entry_aql
= NULL
;
1376 unsigned long bo_size
;
1377 bool is_invalid_userptr
= false;
1381 pr_err("Invalid BO when mapping memory to GPU\n");
1385 /* Make sure restore is not running concurrently. Since we
1386 * don't map invalid userptr BOs, we rely on the next restore
1387 * worker to do the mapping
1389 mutex_lock(&mem
->process_info
->lock
);
1391 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1392 * sure that the MMU notifier is no longer running
1393 * concurrently and the queues are actually stopped
1395 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1396 mmap_write_lock(current
->mm
);
1397 is_invalid_userptr
= atomic_read(&mem
->invalid
);
1398 mmap_write_unlock(current
->mm
);
1401 mutex_lock(&mem
->lock
);
1403 domain
= mem
->domain
;
1404 bo_size
= bo
->tbo
.mem
.size
;
1406 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1408 mem
->va
+ bo_size
* (1 + mem
->aql_queue
),
1409 vm
, domain_string(domain
));
1411 ret
= reserve_bo_and_vm(mem
, vm
, &ctx
);
1415 /* Userptr can be marked as "not invalid", but not actually be
1416 * validated yet (still in the system domain). In that case
1417 * the queues are still stopped and we can leave mapping for
1418 * the next restore worker
1420 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
) &&
1421 bo
->tbo
.mem
.mem_type
== TTM_PL_SYSTEM
)
1422 is_invalid_userptr
= true;
1424 if (check_if_add_bo_to_vm(avm
, mem
)) {
1425 ret
= add_bo_to_vm(adev
, mem
, avm
, false,
1428 goto add_bo_to_vm_failed
;
1429 if (mem
->aql_queue
) {
1430 ret
= add_bo_to_vm(adev
, mem
, avm
,
1431 true, &bo_va_entry_aql
);
1433 goto add_bo_to_vm_failed_aql
;
1436 ret
= vm_validate_pt_pd_bos(avm
);
1438 goto add_bo_to_vm_failed
;
1441 if (mem
->mapped_to_gpu_memory
== 0 &&
1442 !amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1443 /* Validate BO only once. The eviction fence gets added to BO
1444 * the first time it is mapped. Validate will wait for all
1445 * background evictions to complete.
1447 ret
= amdgpu_amdkfd_bo_validate(bo
, domain
, true);
1449 pr_debug("Validate failed\n");
1450 goto map_bo_to_gpuvm_failed
;
1454 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
1455 if (entry
->bo_va
->base
.vm
== vm
&& !entry
->is_mapped
) {
1456 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1457 entry
->va
, entry
->va
+ bo_size
,
1460 ret
= map_bo_to_gpuvm(adev
, entry
, ctx
.sync
,
1461 is_invalid_userptr
);
1463 pr_err("Failed to map bo to gpuvm\n");
1464 goto map_bo_to_gpuvm_failed
;
1467 ret
= vm_update_pds(vm
, ctx
.sync
);
1469 pr_err("Failed to update page directories\n");
1470 goto map_bo_to_gpuvm_failed
;
1473 entry
->is_mapped
= true;
1474 mem
->mapped_to_gpu_memory
++;
1475 pr_debug("\t INC mapping count %d\n",
1476 mem
->mapped_to_gpu_memory
);
1480 if (!amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
) && !bo
->pin_count
)
1482 &avm
->process_info
->eviction_fence
->base
,
1484 ret
= unreserve_bo_and_vms(&ctx
, false, false);
1488 map_bo_to_gpuvm_failed
:
1489 if (bo_va_entry_aql
)
1490 remove_bo_from_vm(adev
, bo_va_entry_aql
, bo_size
);
1491 add_bo_to_vm_failed_aql
:
1493 remove_bo_from_vm(adev
, bo_va_entry
, bo_size
);
1494 add_bo_to_vm_failed
:
1495 unreserve_bo_and_vms(&ctx
, false, false);
1497 mutex_unlock(&mem
->process_info
->lock
);
1498 mutex_unlock(&mem
->lock
);
1502 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1503 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, void *vm
)
1505 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
1506 struct amdkfd_process_info
*process_info
=
1507 ((struct amdgpu_vm
*)vm
)->process_info
;
1508 unsigned long bo_size
= mem
->bo
->tbo
.mem
.size
;
1509 struct kfd_bo_va_list
*entry
;
1510 struct bo_vm_reservation_context ctx
;
1513 mutex_lock(&mem
->lock
);
1515 ret
= reserve_bo_and_cond_vms(mem
, vm
, BO_VM_MAPPED
, &ctx
);
1518 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1519 if (ctx
.n_vms
== 0) {
1524 ret
= vm_validate_pt_pd_bos((struct amdgpu_vm
*)vm
);
1528 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1530 mem
->va
+ bo_size
* (1 + mem
->aql_queue
),
1533 list_for_each_entry(entry
, &mem
->bo_va_list
, bo_list
) {
1534 if (entry
->bo_va
->base
.vm
== vm
&& entry
->is_mapped
) {
1535 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1537 entry
->va
+ bo_size
,
1540 ret
= unmap_bo_from_gpuvm(adev
, entry
, ctx
.sync
);
1542 entry
->is_mapped
= false;
1544 pr_err("failed to unmap VA 0x%llx\n",
1549 mem
->mapped_to_gpu_memory
--;
1550 pr_debug("\t DEC mapping count %d\n",
1551 mem
->mapped_to_gpu_memory
);
1555 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1558 if (mem
->mapped_to_gpu_memory
== 0 &&
1559 !amdgpu_ttm_tt_get_usermm(mem
->bo
->tbo
.ttm
) && !mem
->bo
->pin_count
)
1560 amdgpu_amdkfd_remove_eviction_fence(mem
->bo
,
1561 process_info
->eviction_fence
);
1564 unreserve_bo_and_vms(&ctx
, false, false);
1566 mutex_unlock(&mem
->lock
);
1570 int amdgpu_amdkfd_gpuvm_sync_memory(
1571 struct kgd_dev
*kgd
, struct kgd_mem
*mem
, bool intr
)
1573 struct amdgpu_sync sync
;
1576 amdgpu_sync_create(&sync
);
1578 mutex_lock(&mem
->lock
);
1579 amdgpu_sync_clone(&mem
->sync
, &sync
);
1580 mutex_unlock(&mem
->lock
);
1582 ret
= amdgpu_sync_wait(&sync
, intr
);
1583 amdgpu_sync_free(&sync
);
1587 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev
*kgd
,
1588 struct kgd_mem
*mem
, void **kptr
, uint64_t *size
)
1591 struct amdgpu_bo
*bo
= mem
->bo
;
1593 if (amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
)) {
1594 pr_err("userptr can't be mapped to kernel\n");
1598 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1599 * this BO in BO's restoring after eviction.
1601 mutex_lock(&mem
->process_info
->lock
);
1603 ret
= amdgpu_bo_reserve(bo
, true);
1605 pr_err("Failed to reserve bo. ret %d\n", ret
);
1606 goto bo_reserve_failed
;
1609 ret
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
1611 pr_err("Failed to pin bo. ret %d\n", ret
);
1615 ret
= amdgpu_bo_kmap(bo
, kptr
);
1617 pr_err("Failed to map bo to kernel. ret %d\n", ret
);
1621 amdgpu_amdkfd_remove_eviction_fence(
1622 bo
, mem
->process_info
->eviction_fence
);
1623 list_del_init(&mem
->validate_list
.head
);
1626 *size
= amdgpu_bo_size(bo
);
1628 amdgpu_bo_unreserve(bo
);
1630 mutex_unlock(&mem
->process_info
->lock
);
1634 amdgpu_bo_unpin(bo
);
1636 amdgpu_bo_unreserve(bo
);
1638 mutex_unlock(&mem
->process_info
->lock
);
1643 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev
*kgd
,
1644 struct kfd_vm_fault_info
*mem
)
1646 struct amdgpu_device
*adev
;
1648 adev
= (struct amdgpu_device
*)kgd
;
1649 if (atomic_read(&adev
->gmc
.vm_fault_info_updated
) == 1) {
1650 *mem
= *adev
->gmc
.vm_fault_info
;
1652 atomic_set(&adev
->gmc
.vm_fault_info_updated
, 0);
1657 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev
*kgd
,
1658 struct dma_buf
*dma_buf
,
1659 uint64_t va
, void *vm
,
1660 struct kgd_mem
**mem
, uint64_t *size
,
1661 uint64_t *mmap_offset
)
1663 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
1664 struct drm_gem_object
*obj
;
1665 struct amdgpu_bo
*bo
;
1666 struct amdgpu_vm
*avm
= (struct amdgpu_vm
*)vm
;
1668 if (dma_buf
->ops
!= &amdgpu_dmabuf_ops
)
1669 /* Can't handle non-graphics buffers */
1672 obj
= dma_buf
->priv
;
1673 if (obj
->dev
->dev_private
!= adev
)
1674 /* Can't handle buffers from other devices */
1677 bo
= gem_to_amdgpu_bo(obj
);
1678 if (!(bo
->preferred_domains
& (AMDGPU_GEM_DOMAIN_VRAM
|
1679 AMDGPU_GEM_DOMAIN_GTT
)))
1680 /* Only VRAM and GTT BOs are supported */
1683 *mem
= kzalloc(sizeof(struct kgd_mem
), GFP_KERNEL
);
1688 *size
= amdgpu_bo_size(bo
);
1691 *mmap_offset
= amdgpu_bo_mmap_offset(bo
);
1693 INIT_LIST_HEAD(&(*mem
)->bo_va_list
);
1694 mutex_init(&(*mem
)->lock
);
1696 (*mem
)->alloc_flags
=
1697 ((bo
->preferred_domains
& AMDGPU_GEM_DOMAIN_VRAM
) ?
1698 KFD_IOC_ALLOC_MEM_FLAGS_VRAM
: KFD_IOC_ALLOC_MEM_FLAGS_GTT
)
1699 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1700 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE
;
1702 drm_gem_object_get(&bo
->tbo
.base
);
1705 (*mem
)->domain
= (bo
->preferred_domains
& AMDGPU_GEM_DOMAIN_VRAM
) ?
1706 AMDGPU_GEM_DOMAIN_VRAM
: AMDGPU_GEM_DOMAIN_GTT
;
1707 (*mem
)->mapped_to_gpu_memory
= 0;
1708 (*mem
)->process_info
= avm
->process_info
;
1709 add_kgd_mem_to_kfd_bo_list(*mem
, avm
->process_info
, false);
1710 amdgpu_sync_create(&(*mem
)->sync
);
1711 (*mem
)->is_imported
= true;
1716 /* Evict a userptr BO by stopping the queues if necessary
1718 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1719 * cannot do any memory allocations, and cannot take any locks that
1720 * are held elsewhere while allocating memory. Therefore this is as
1721 * simple as possible, using atomic counters.
1723 * It doesn't do anything to the BO itself. The real work happens in
1724 * restore, where we get updated page addresses. This function only
1725 * ensures that GPU access to the BO is stopped.
1727 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
,
1728 struct mm_struct
*mm
)
1730 struct amdkfd_process_info
*process_info
= mem
->process_info
;
1734 atomic_inc(&mem
->invalid
);
1735 evicted_bos
= atomic_inc_return(&process_info
->evicted_bos
);
1736 if (evicted_bos
== 1) {
1737 /* First eviction, stop the queues */
1738 r
= kgd2kfd_quiesce_mm(mm
);
1740 pr_err("Failed to quiesce KFD\n");
1741 schedule_delayed_work(&process_info
->restore_userptr_work
,
1742 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS
));
1748 /* Update invalid userptr BOs
1750 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1751 * userptr_inval_list and updates user pages for all BOs that have
1752 * been invalidated since their last update.
1754 static int update_invalid_user_pages(struct amdkfd_process_info
*process_info
,
1755 struct mm_struct
*mm
)
1757 struct kgd_mem
*mem
, *tmp_mem
;
1758 struct amdgpu_bo
*bo
;
1759 struct ttm_operation_ctx ctx
= { false, false };
1762 /* Move all invalidated BOs to the userptr_inval_list and
1763 * release their user pages by migration to the CPU domain
1765 list_for_each_entry_safe(mem
, tmp_mem
,
1766 &process_info
->userptr_valid_list
,
1767 validate_list
.head
) {
1768 if (!atomic_read(&mem
->invalid
))
1769 continue; /* BO is still valid */
1773 if (amdgpu_bo_reserve(bo
, true))
1775 amdgpu_bo_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_CPU
);
1776 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
1777 amdgpu_bo_unreserve(bo
);
1779 pr_err("%s: Failed to invalidate userptr BO\n",
1784 list_move_tail(&mem
->validate_list
.head
,
1785 &process_info
->userptr_inval_list
);
1788 if (list_empty(&process_info
->userptr_inval_list
))
1789 return 0; /* All evicted userptr BOs were freed */
1791 /* Go through userptr_inval_list and update any invalid user_pages */
1792 list_for_each_entry(mem
, &process_info
->userptr_inval_list
,
1793 validate_list
.head
) {
1794 invalid
= atomic_read(&mem
->invalid
);
1796 /* BO hasn't been invalidated since the last
1797 * revalidation attempt. Keep its BO list.
1803 /* Get updated user pages */
1804 ret
= amdgpu_ttm_tt_get_user_pages(bo
, bo
->tbo
.ttm
->pages
);
1806 pr_debug("%s: Failed to get user pages: %d\n",
1809 /* Return error -EBUSY or -ENOMEM, retry restore */
1814 * FIXME: Cannot ignore the return code, must hold
1817 amdgpu_ttm_tt_get_user_pages_done(bo
->tbo
.ttm
);
1819 /* Mark the BO as valid unless it was invalidated
1820 * again concurrently.
1822 if (atomic_cmpxchg(&mem
->invalid
, invalid
, 0) != invalid
)
1829 /* Validate invalid userptr BOs
1831 * Validates BOs on the userptr_inval_list, and moves them back to the
1832 * userptr_valid_list. Also updates GPUVM page tables with new page
1833 * addresses and waits for the page table updates to complete.
1835 static int validate_invalid_user_pages(struct amdkfd_process_info
*process_info
)
1837 struct amdgpu_bo_list_entry
*pd_bo_list_entries
;
1838 struct list_head resv_list
, duplicates
;
1839 struct ww_acquire_ctx ticket
;
1840 struct amdgpu_sync sync
;
1842 struct amdgpu_vm
*peer_vm
;
1843 struct kgd_mem
*mem
, *tmp_mem
;
1844 struct amdgpu_bo
*bo
;
1845 struct ttm_operation_ctx ctx
= { false, false };
1848 pd_bo_list_entries
= kcalloc(process_info
->n_vms
,
1849 sizeof(struct amdgpu_bo_list_entry
),
1851 if (!pd_bo_list_entries
) {
1852 pr_err("%s: Failed to allocate PD BO list entries\n", __func__
);
1857 INIT_LIST_HEAD(&resv_list
);
1858 INIT_LIST_HEAD(&duplicates
);
1860 /* Get all the page directory BOs that need to be reserved */
1862 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
1864 amdgpu_vm_get_pd_bo(peer_vm
, &resv_list
,
1865 &pd_bo_list_entries
[i
++]);
1866 /* Add the userptr_inval_list entries to resv_list */
1867 list_for_each_entry(mem
, &process_info
->userptr_inval_list
,
1868 validate_list
.head
) {
1869 list_add_tail(&mem
->resv_list
.head
, &resv_list
);
1870 mem
->resv_list
.bo
= mem
->validate_list
.bo
;
1871 mem
->resv_list
.num_shared
= mem
->validate_list
.num_shared
;
1874 /* Reserve all BOs and page tables for validation */
1875 ret
= ttm_eu_reserve_buffers(&ticket
, &resv_list
, false, &duplicates
);
1876 WARN(!list_empty(&duplicates
), "Duplicates should be empty");
1880 amdgpu_sync_create(&sync
);
1882 ret
= process_validate_vms(process_info
);
1886 /* Validate BOs and update GPUVM page tables */
1887 list_for_each_entry_safe(mem
, tmp_mem
,
1888 &process_info
->userptr_inval_list
,
1889 validate_list
.head
) {
1890 struct kfd_bo_va_list
*bo_va_entry
;
1894 /* Validate the BO if we got user pages */
1895 if (bo
->tbo
.ttm
->pages
[0]) {
1896 amdgpu_bo_placement_from_domain(bo
, mem
->domain
);
1897 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
1899 pr_err("%s: failed to validate BO\n", __func__
);
1904 list_move_tail(&mem
->validate_list
.head
,
1905 &process_info
->userptr_valid_list
);
1907 /* Update mapping. If the BO was not validated
1908 * (because we couldn't get user pages), this will
1909 * clear the page table entries, which will result in
1910 * VM faults if the GPU tries to access the invalid
1913 list_for_each_entry(bo_va_entry
, &mem
->bo_va_list
, bo_list
) {
1914 if (!bo_va_entry
->is_mapped
)
1917 ret
= update_gpuvm_pte((struct amdgpu_device
*)
1918 bo_va_entry
->kgd_dev
,
1919 bo_va_entry
, &sync
);
1921 pr_err("%s: update PTE failed\n", __func__
);
1922 /* make sure this gets validated again */
1923 atomic_inc(&mem
->invalid
);
1929 /* Update page directories */
1930 ret
= process_update_pds(process_info
, &sync
);
1933 ttm_eu_backoff_reservation(&ticket
, &resv_list
);
1934 amdgpu_sync_wait(&sync
, false);
1935 amdgpu_sync_free(&sync
);
1937 kfree(pd_bo_list_entries
);
1943 /* Worker callback to restore evicted userptr BOs
1945 * Tries to update and validate all userptr BOs. If successful and no
1946 * concurrent evictions happened, the queues are restarted. Otherwise,
1947 * reschedule for another attempt later.
1949 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct
*work
)
1951 struct delayed_work
*dwork
= to_delayed_work(work
);
1952 struct amdkfd_process_info
*process_info
=
1953 container_of(dwork
, struct amdkfd_process_info
,
1954 restore_userptr_work
);
1955 struct task_struct
*usertask
;
1956 struct mm_struct
*mm
;
1959 evicted_bos
= atomic_read(&process_info
->evicted_bos
);
1963 /* Reference task and mm in case of concurrent process termination */
1964 usertask
= get_pid_task(process_info
->pid
, PIDTYPE_PID
);
1967 mm
= get_task_mm(usertask
);
1969 put_task_struct(usertask
);
1973 mutex_lock(&process_info
->lock
);
1975 if (update_invalid_user_pages(process_info
, mm
))
1977 /* userptr_inval_list can be empty if all evicted userptr BOs
1978 * have been freed. In that case there is nothing to validate
1979 * and we can just restart the queues.
1981 if (!list_empty(&process_info
->userptr_inval_list
)) {
1982 if (atomic_read(&process_info
->evicted_bos
) != evicted_bos
)
1983 goto unlock_out
; /* Concurrent eviction, try again */
1985 if (validate_invalid_user_pages(process_info
))
1988 /* Final check for concurrent evicton and atomic update. If
1989 * another eviction happens after successful update, it will
1990 * be a first eviction that calls quiesce_mm. The eviction
1991 * reference counting inside KFD will handle this case.
1993 if (atomic_cmpxchg(&process_info
->evicted_bos
, evicted_bos
, 0) !=
1997 if (kgd2kfd_resume_mm(mm
)) {
1998 pr_err("%s: Failed to resume KFD\n", __func__
);
1999 /* No recovery from this failure. Probably the CP is
2000 * hanging. No point trying again.
2005 mutex_unlock(&process_info
->lock
);
2007 put_task_struct(usertask
);
2009 /* If validation failed, reschedule another attempt */
2011 schedule_delayed_work(&process_info
->restore_userptr_work
,
2012 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS
));
2015 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2016 * KFD process identified by process_info
2018 * @process_info: amdkfd_process_info of the KFD process
2020 * After memory eviction, restore thread calls this function. The function
2021 * should be called when the Process is still valid. BO restore involves -
2023 * 1. Release old eviction fence and create new one
2024 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2025 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2026 * BOs that need to be reserved.
2027 * 4. Reserve all the BOs
2028 * 5. Validate of PD and PT BOs.
2029 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2030 * 7. Add fence to all PD and PT BOs.
2031 * 8. Unreserve all BOs
2033 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info
, struct dma_fence
**ef
)
2035 struct amdgpu_bo_list_entry
*pd_bo_list
;
2036 struct amdkfd_process_info
*process_info
= info
;
2037 struct amdgpu_vm
*peer_vm
;
2038 struct kgd_mem
*mem
;
2039 struct bo_vm_reservation_context ctx
;
2040 struct amdgpu_amdkfd_fence
*new_fence
;
2042 struct list_head duplicate_save
;
2043 struct amdgpu_sync sync_obj
;
2045 INIT_LIST_HEAD(&duplicate_save
);
2046 INIT_LIST_HEAD(&ctx
.list
);
2047 INIT_LIST_HEAD(&ctx
.duplicates
);
2049 pd_bo_list
= kcalloc(process_info
->n_vms
,
2050 sizeof(struct amdgpu_bo_list_entry
),
2056 mutex_lock(&process_info
->lock
);
2057 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
2059 amdgpu_vm_get_pd_bo(peer_vm
, &ctx
.list
, &pd_bo_list
[i
++]);
2061 /* Reserve all BOs and page tables/directory. Add all BOs from
2062 * kfd_bo_list to ctx.list
2064 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2065 validate_list
.head
) {
2067 list_add_tail(&mem
->resv_list
.head
, &ctx
.list
);
2068 mem
->resv_list
.bo
= mem
->validate_list
.bo
;
2069 mem
->resv_list
.num_shared
= mem
->validate_list
.num_shared
;
2072 ret
= ttm_eu_reserve_buffers(&ctx
.ticket
, &ctx
.list
,
2073 false, &duplicate_save
);
2075 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2076 goto ttm_reserve_fail
;
2079 amdgpu_sync_create(&sync_obj
);
2081 /* Validate PDs and PTs */
2082 ret
= process_validate_vms(process_info
);
2084 goto validate_map_fail
;
2086 ret
= process_sync_pds_resv(process_info
, &sync_obj
);
2088 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2089 goto validate_map_fail
;
2092 /* Validate BOs and map them to GPUVM (update VM page tables). */
2093 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2094 validate_list
.head
) {
2096 struct amdgpu_bo
*bo
= mem
->bo
;
2097 uint32_t domain
= mem
->domain
;
2098 struct kfd_bo_va_list
*bo_va_entry
;
2100 ret
= amdgpu_amdkfd_bo_validate(bo
, domain
, false);
2102 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2103 goto validate_map_fail
;
2105 ret
= amdgpu_sync_fence(&sync_obj
, bo
->tbo
.moving
, false);
2107 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2108 goto validate_map_fail
;
2110 list_for_each_entry(bo_va_entry
, &mem
->bo_va_list
,
2112 ret
= update_gpuvm_pte((struct amdgpu_device
*)
2113 bo_va_entry
->kgd_dev
,
2117 pr_debug("Memory eviction: update PTE failed. Try again\n");
2118 goto validate_map_fail
;
2123 /* Update page directories */
2124 ret
= process_update_pds(process_info
, &sync_obj
);
2126 pr_debug("Memory eviction: update PDs failed. Try again\n");
2127 goto validate_map_fail
;
2130 /* Wait for validate and PT updates to finish */
2131 amdgpu_sync_wait(&sync_obj
, false);
2133 /* Release old eviction fence and create new one, because fence only
2134 * goes from unsignaled to signaled, fence cannot be reused.
2135 * Use context and mm from the old fence.
2137 new_fence
= amdgpu_amdkfd_fence_create(
2138 process_info
->eviction_fence
->base
.context
,
2139 process_info
->eviction_fence
->mm
);
2141 pr_err("Failed to create eviction fence\n");
2143 goto validate_map_fail
;
2145 dma_fence_put(&process_info
->eviction_fence
->base
);
2146 process_info
->eviction_fence
= new_fence
;
2147 *ef
= dma_fence_get(&new_fence
->base
);
2149 /* Attach new eviction fence to all BOs */
2150 list_for_each_entry(mem
, &process_info
->kfd_bo_list
,
2152 amdgpu_bo_fence(mem
->bo
,
2153 &process_info
->eviction_fence
->base
, true);
2155 /* Attach eviction fence to PD / PT BOs */
2156 list_for_each_entry(peer_vm
, &process_info
->vm_list_head
,
2158 struct amdgpu_bo
*bo
= peer_vm
->root
.base
.bo
;
2160 amdgpu_bo_fence(bo
, &process_info
->eviction_fence
->base
, true);
2164 ttm_eu_backoff_reservation(&ctx
.ticket
, &ctx
.list
);
2165 amdgpu_sync_free(&sync_obj
);
2167 mutex_unlock(&process_info
->lock
);
2172 int amdgpu_amdkfd_add_gws_to_process(void *info
, void *gws
, struct kgd_mem
**mem
)
2174 struct amdkfd_process_info
*process_info
= (struct amdkfd_process_info
*)info
;
2175 struct amdgpu_bo
*gws_bo
= (struct amdgpu_bo
*)gws
;
2181 *mem
= kzalloc(sizeof(struct kgd_mem
), GFP_KERNEL
);
2185 mutex_init(&(*mem
)->lock
);
2186 INIT_LIST_HEAD(&(*mem
)->bo_va_list
);
2187 (*mem
)->bo
= amdgpu_bo_ref(gws_bo
);
2188 (*mem
)->domain
= AMDGPU_GEM_DOMAIN_GWS
;
2189 (*mem
)->process_info
= process_info
;
2190 add_kgd_mem_to_kfd_bo_list(*mem
, process_info
, false);
2191 amdgpu_sync_create(&(*mem
)->sync
);
2194 /* Validate gws bo the first time it is added to process */
2195 mutex_lock(&(*mem
)->process_info
->lock
);
2196 ret
= amdgpu_bo_reserve(gws_bo
, false);
2197 if (unlikely(ret
)) {
2198 pr_err("Reserve gws bo failed %d\n", ret
);
2199 goto bo_reservation_failure
;
2202 ret
= amdgpu_amdkfd_bo_validate(gws_bo
, AMDGPU_GEM_DOMAIN_GWS
, true);
2204 pr_err("GWS BO validate failed %d\n", ret
);
2205 goto bo_validation_failure
;
2207 /* GWS resource is shared b/t amdgpu and amdkfd
2208 * Add process eviction fence to bo so they can
2211 ret
= dma_resv_reserve_shared(gws_bo
->tbo
.base
.resv
, 1);
2213 goto reserve_shared_fail
;
2214 amdgpu_bo_fence(gws_bo
, &process_info
->eviction_fence
->base
, true);
2215 amdgpu_bo_unreserve(gws_bo
);
2216 mutex_unlock(&(*mem
)->process_info
->lock
);
2220 reserve_shared_fail
:
2221 bo_validation_failure
:
2222 amdgpu_bo_unreserve(gws_bo
);
2223 bo_reservation_failure
:
2224 mutex_unlock(&(*mem
)->process_info
->lock
);
2225 amdgpu_sync_free(&(*mem
)->sync
);
2226 remove_kgd_mem_from_kfd_bo_list(*mem
, process_info
);
2227 amdgpu_bo_unref(&gws_bo
);
2228 mutex_destroy(&(*mem
)->lock
);
2234 int amdgpu_amdkfd_remove_gws_from_process(void *info
, void *mem
)
2237 struct amdkfd_process_info
*process_info
= (struct amdkfd_process_info
*)info
;
2238 struct kgd_mem
*kgd_mem
= (struct kgd_mem
*)mem
;
2239 struct amdgpu_bo
*gws_bo
= kgd_mem
->bo
;
2241 /* Remove BO from process's validate list so restore worker won't touch
2244 remove_kgd_mem_from_kfd_bo_list(kgd_mem
, process_info
);
2246 ret
= amdgpu_bo_reserve(gws_bo
, false);
2247 if (unlikely(ret
)) {
2248 pr_err("Reserve gws bo failed %d\n", ret
);
2249 //TODO add BO back to validate_list?
2252 amdgpu_amdkfd_remove_eviction_fence(gws_bo
,
2253 process_info
->eviction_fence
);
2254 amdgpu_bo_unreserve(gws_bo
);
2255 amdgpu_sync_free(&kgd_mem
->sync
);
2256 amdgpu_bo_unref(&gws_bo
);
2257 mutex_destroy(&kgd_mem
->lock
);
2262 /* Returns GPU-specific tiling mode information */
2263 int amdgpu_amdkfd_get_tile_config(struct kgd_dev
*kgd
,
2264 struct tile_config
*config
)
2266 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
2268 config
->gb_addr_config
= adev
->gfx
.config
.gb_addr_config
;
2269 config
->tile_config_ptr
= adev
->gfx
.config
.tile_mode_array
;
2270 config
->num_tile_configs
=
2271 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
2272 config
->macro_tile_config_ptr
=
2273 adev
->gfx
.config
.macrotile_mode_array
;
2274 config
->num_macro_tile_configs
=
2275 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
2277 /* Those values are not set from GFX9 onwards */
2278 config
->num_banks
= adev
->gfx
.config
.num_banks
;
2279 config
->num_ranks
= adev
->gfx
.config
.num_ranks
;