]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
[mirror_ubuntu-kernels.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
33
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36
37 /* Userptr restore delay, just long enough to allow consecutive VM
38 * changes to accumulate
39 */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41
42 /* Impose limit on how much memory KFD can use */
43 static struct {
44 uint64_t max_system_mem_limit;
45 uint64_t max_ttm_mem_limit;
46 int64_t system_mem_used;
47 int64_t ttm_mem_used;
48 spinlock_t mem_limit_lock;
49 } kfd_mem_limit;
50
51 /* Struct used for amdgpu_amdkfd_bo_validate */
52 struct amdgpu_vm_parser {
53 uint32_t domain;
54 bool wait;
55 };
56
57 static const char * const domain_bit_to_string[] = {
58 "CPU",
59 "GTT",
60 "VRAM",
61 "GDS",
62 "GWS",
63 "OA"
64 };
65
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
69
70
71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
72 {
73 return (struct amdgpu_device *)kgd;
74 }
75
76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
77 struct kgd_mem *mem)
78 {
79 struct kfd_bo_va_list *entry;
80
81 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
82 if (entry->bo_va->base.vm == avm)
83 return false;
84
85 return true;
86 }
87
88 /* Set memory usage limits. Current, limits are
89 * System (TTM + userptr) memory - 15/16th System RAM
90 * TTM memory - 3/8th System RAM
91 */
92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
93 {
94 struct sysinfo si;
95 uint64_t mem;
96
97 si_meminfo(&si);
98 mem = si.totalram - si.totalhigh;
99 mem *= si.mem_unit;
100
101 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
102 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
103 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
104 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
105 (kfd_mem_limit.max_system_mem_limit >> 20),
106 (kfd_mem_limit.max_ttm_mem_limit >> 20));
107 }
108
109 /* Estimate page table size needed to represent a given memory size
110 *
111 * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
112 * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
113 * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
114 * for 2MB pages for TLB efficiency. However, small allocations and
115 * fragmented system memory still need some 4KB pages. We choose a
116 * compromise that should work in most cases without reserving too
117 * much memory for page tables unnecessarily (factor 16K, >> 14).
118 */
119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
120
121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
122 uint64_t size, u32 domain, bool sg)
123 {
124 uint64_t reserved_for_pt =
125 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
126 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
127 int ret = 0;
128
129 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
130 sizeof(struct amdgpu_bo));
131
132 vram_needed = 0;
133 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
134 /* TTM GTT memory */
135 system_mem_needed = acc_size + size;
136 ttm_mem_needed = acc_size + size;
137 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
138 /* Userptr */
139 system_mem_needed = acc_size + size;
140 ttm_mem_needed = acc_size;
141 } else {
142 /* VRAM and SG */
143 system_mem_needed = acc_size;
144 ttm_mem_needed = acc_size;
145 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
146 vram_needed = size;
147 }
148
149 spin_lock(&kfd_mem_limit.mem_limit_lock);
150
151 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
152 kfd_mem_limit.max_system_mem_limit) ||
153 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
154 kfd_mem_limit.max_ttm_mem_limit) ||
155 (adev->kfd.vram_used + vram_needed >
156 adev->gmc.real_vram_size - reserved_for_pt)) {
157 ret = -ENOMEM;
158 } else {
159 kfd_mem_limit.system_mem_used += system_mem_needed;
160 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
161 adev->kfd.vram_used += vram_needed;
162 }
163
164 spin_unlock(&kfd_mem_limit.mem_limit_lock);
165 return ret;
166 }
167
168 static void unreserve_mem_limit(struct amdgpu_device *adev,
169 uint64_t size, u32 domain, bool sg)
170 {
171 size_t acc_size;
172
173 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
174 sizeof(struct amdgpu_bo));
175
176 spin_lock(&kfd_mem_limit.mem_limit_lock);
177 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
178 kfd_mem_limit.system_mem_used -= (acc_size + size);
179 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
180 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
181 kfd_mem_limit.system_mem_used -= (acc_size + size);
182 kfd_mem_limit.ttm_mem_used -= acc_size;
183 } else {
184 kfd_mem_limit.system_mem_used -= acc_size;
185 kfd_mem_limit.ttm_mem_used -= acc_size;
186 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
187 adev->kfd.vram_used -= size;
188 WARN_ONCE(adev->kfd.vram_used < 0,
189 "kfd VRAM memory accounting unbalanced");
190 }
191 }
192 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
193 "kfd system memory accounting unbalanced");
194 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
195 "kfd TTM memory accounting unbalanced");
196
197 spin_unlock(&kfd_mem_limit.mem_limit_lock);
198 }
199
200 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
201 {
202 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
203 u32 domain = bo->preferred_domains;
204 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
205
206 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
207 domain = AMDGPU_GEM_DOMAIN_CPU;
208 sg = false;
209 }
210
211 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
212 }
213
214
215 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
216 * reservation object.
217 *
218 * @bo: [IN] Remove eviction fence(s) from this BO
219 * @ef: [IN] This eviction fence is removed if it
220 * is present in the shared list.
221 *
222 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
223 */
224 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
225 struct amdgpu_amdkfd_fence *ef)
226 {
227 struct dma_resv *resv = bo->tbo.base.resv;
228 struct dma_resv_list *old, *new;
229 unsigned int i, j, k;
230
231 if (!ef)
232 return -EINVAL;
233
234 old = dma_resv_get_list(resv);
235 if (!old)
236 return 0;
237
238 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
239 GFP_KERNEL);
240 if (!new)
241 return -ENOMEM;
242
243 /* Go through all the shared fences in the resevation object and sort
244 * the interesting ones to the end of the list.
245 */
246 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
247 struct dma_fence *f;
248
249 f = rcu_dereference_protected(old->shared[i],
250 dma_resv_held(resv));
251
252 if (f->context == ef->base.context)
253 RCU_INIT_POINTER(new->shared[--j], f);
254 else
255 RCU_INIT_POINTER(new->shared[k++], f);
256 }
257 new->shared_max = old->shared_max;
258 new->shared_count = k;
259
260 /* Install the new fence list, seqcount provides the barriers */
261 preempt_disable();
262 write_seqcount_begin(&resv->seq);
263 RCU_INIT_POINTER(resv->fence, new);
264 write_seqcount_end(&resv->seq);
265 preempt_enable();
266
267 /* Drop the references to the removed fences or move them to ef_list */
268 for (i = j, k = 0; i < old->shared_count; ++i) {
269 struct dma_fence *f;
270
271 f = rcu_dereference_protected(new->shared[i],
272 dma_resv_held(resv));
273 dma_fence_put(f);
274 }
275 kfree_rcu(old, rcu);
276
277 return 0;
278 }
279
280 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
281 {
282 struct amdgpu_bo *root = bo;
283 struct amdgpu_vm_bo_base *vm_bo;
284 struct amdgpu_vm *vm;
285 struct amdkfd_process_info *info;
286 struct amdgpu_amdkfd_fence *ef;
287 int ret;
288
289 /* we can always get vm_bo from root PD bo.*/
290 while (root->parent)
291 root = root->parent;
292
293 vm_bo = root->vm_bo;
294 if (!vm_bo)
295 return 0;
296
297 vm = vm_bo->vm;
298 if (!vm)
299 return 0;
300
301 info = vm->process_info;
302 if (!info || !info->eviction_fence)
303 return 0;
304
305 ef = container_of(dma_fence_get(&info->eviction_fence->base),
306 struct amdgpu_amdkfd_fence, base);
307
308 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
309 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
310 dma_resv_unlock(bo->tbo.base.resv);
311
312 dma_fence_put(&ef->base);
313 return ret;
314 }
315
316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
317 bool wait)
318 {
319 struct ttm_operation_ctx ctx = { false, false };
320 int ret;
321
322 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
323 "Called with userptr BO"))
324 return -EINVAL;
325
326 amdgpu_bo_placement_from_domain(bo, domain);
327
328 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
329 if (ret)
330 goto validate_fail;
331 if (wait)
332 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
333
334 validate_fail:
335 return ret;
336 }
337
338 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
339 {
340 struct amdgpu_vm_parser *p = param;
341
342 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
343 }
344
345 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
346 *
347 * Page directories are not updated here because huge page handling
348 * during page table updates can invalidate page directory entries
349 * again. Page directories are only updated after updating page
350 * tables.
351 */
352 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
353 {
354 struct amdgpu_bo *pd = vm->root.base.bo;
355 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
356 struct amdgpu_vm_parser param;
357 int ret;
358
359 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
360 param.wait = false;
361
362 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
363 &param);
364 if (ret) {
365 pr_err("failed to validate PT BOs\n");
366 return ret;
367 }
368
369 ret = amdgpu_amdkfd_validate(&param, pd);
370 if (ret) {
371 pr_err("failed to validate PD\n");
372 return ret;
373 }
374
375 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
376
377 if (vm->use_cpu_for_update) {
378 ret = amdgpu_bo_kmap(pd, NULL);
379 if (ret) {
380 pr_err("failed to kmap PD, ret=%d\n", ret);
381 return ret;
382 }
383 }
384
385 return 0;
386 }
387
388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
389 {
390 struct amdgpu_bo *pd = vm->root.base.bo;
391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
392 int ret;
393
394 ret = amdgpu_vm_update_pdes(adev, vm, false);
395 if (ret)
396 return ret;
397
398 return amdgpu_sync_fence(sync, vm->last_update, false);
399 }
400
401 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
402 {
403 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
404 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
405 uint32_t mapping_flags;
406
407 mapping_flags = AMDGPU_VM_PAGE_READABLE;
408 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
409 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
410 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
411 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
412
413 switch (adev->asic_type) {
414 case CHIP_ARCTURUS:
415 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
416 if (bo_adev == adev)
417 mapping_flags |= coherent ?
418 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
419 else
420 mapping_flags |= AMDGPU_VM_MTYPE_UC;
421 } else {
422 mapping_flags |= coherent ?
423 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
424 }
425 break;
426 default:
427 mapping_flags |= coherent ?
428 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
429 }
430
431 return amdgpu_gem_va_map_flags(adev, mapping_flags);
432 }
433
434 /* add_bo_to_vm - Add a BO to a VM
435 *
436 * Everything that needs to bo done only once when a BO is first added
437 * to a VM. It can later be mapped and unmapped many times without
438 * repeating these steps.
439 *
440 * 1. Allocate and initialize BO VA entry data structure
441 * 2. Add BO to the VM
442 * 3. Determine ASIC-specific PTE flags
443 * 4. Alloc page tables and directories if needed
444 * 4a. Validate new page tables and directories
445 */
446 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
447 struct amdgpu_vm *vm, bool is_aql,
448 struct kfd_bo_va_list **p_bo_va_entry)
449 {
450 int ret;
451 struct kfd_bo_va_list *bo_va_entry;
452 struct amdgpu_bo *bo = mem->bo;
453 uint64_t va = mem->va;
454 struct list_head *list_bo_va = &mem->bo_va_list;
455 unsigned long bo_size = bo->tbo.mem.size;
456
457 if (!va) {
458 pr_err("Invalid VA when adding BO to VM\n");
459 return -EINVAL;
460 }
461
462 if (is_aql)
463 va += bo_size;
464
465 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
466 if (!bo_va_entry)
467 return -ENOMEM;
468
469 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
470 va + bo_size, vm);
471
472 /* Add BO to VM internal data structures*/
473 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
474 if (!bo_va_entry->bo_va) {
475 ret = -EINVAL;
476 pr_err("Failed to add BO object to VM. ret == %d\n",
477 ret);
478 goto err_vmadd;
479 }
480
481 bo_va_entry->va = va;
482 bo_va_entry->pte_flags = get_pte_flags(adev, mem);
483 bo_va_entry->kgd_dev = (void *)adev;
484 list_add(&bo_va_entry->bo_list, list_bo_va);
485
486 if (p_bo_va_entry)
487 *p_bo_va_entry = bo_va_entry;
488
489 /* Allocate validate page tables if needed */
490 ret = vm_validate_pt_pd_bos(vm);
491 if (ret) {
492 pr_err("validate_pt_pd_bos() failed\n");
493 goto err_alloc_pts;
494 }
495
496 return 0;
497
498 err_alloc_pts:
499 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
500 list_del(&bo_va_entry->bo_list);
501 err_vmadd:
502 kfree(bo_va_entry);
503 return ret;
504 }
505
506 static void remove_bo_from_vm(struct amdgpu_device *adev,
507 struct kfd_bo_va_list *entry, unsigned long size)
508 {
509 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
510 entry->va,
511 entry->va + size, entry);
512 amdgpu_vm_bo_rmv(adev, entry->bo_va);
513 list_del(&entry->bo_list);
514 kfree(entry);
515 }
516
517 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
518 struct amdkfd_process_info *process_info,
519 bool userptr)
520 {
521 struct ttm_validate_buffer *entry = &mem->validate_list;
522 struct amdgpu_bo *bo = mem->bo;
523
524 INIT_LIST_HEAD(&entry->head);
525 entry->num_shared = 1;
526 entry->bo = &bo->tbo;
527 mutex_lock(&process_info->lock);
528 if (userptr)
529 list_add_tail(&entry->head, &process_info->userptr_valid_list);
530 else
531 list_add_tail(&entry->head, &process_info->kfd_bo_list);
532 mutex_unlock(&process_info->lock);
533 }
534
535 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
536 struct amdkfd_process_info *process_info)
537 {
538 struct ttm_validate_buffer *bo_list_entry;
539
540 bo_list_entry = &mem->validate_list;
541 mutex_lock(&process_info->lock);
542 list_del(&bo_list_entry->head);
543 mutex_unlock(&process_info->lock);
544 }
545
546 /* Initializes user pages. It registers the MMU notifier and validates
547 * the userptr BO in the GTT domain.
548 *
549 * The BO must already be on the userptr_valid_list. Otherwise an
550 * eviction and restore may happen that leaves the new BO unmapped
551 * with the user mode queues running.
552 *
553 * Takes the process_info->lock to protect against concurrent restore
554 * workers.
555 *
556 * Returns 0 for success, negative errno for errors.
557 */
558 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
559 {
560 struct amdkfd_process_info *process_info = mem->process_info;
561 struct amdgpu_bo *bo = mem->bo;
562 struct ttm_operation_ctx ctx = { true, false };
563 int ret = 0;
564
565 mutex_lock(&process_info->lock);
566
567 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
568 if (ret) {
569 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
570 goto out;
571 }
572
573 ret = amdgpu_mn_register(bo, user_addr);
574 if (ret) {
575 pr_err("%s: Failed to register MMU notifier: %d\n",
576 __func__, ret);
577 goto out;
578 }
579
580 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
581 if (ret) {
582 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
583 goto unregister_out;
584 }
585
586 ret = amdgpu_bo_reserve(bo, true);
587 if (ret) {
588 pr_err("%s: Failed to reserve BO\n", __func__);
589 goto release_out;
590 }
591 amdgpu_bo_placement_from_domain(bo, mem->domain);
592 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
593 if (ret)
594 pr_err("%s: failed to validate BO\n", __func__);
595 amdgpu_bo_unreserve(bo);
596
597 release_out:
598 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
599 unregister_out:
600 if (ret)
601 amdgpu_mn_unregister(bo);
602 out:
603 mutex_unlock(&process_info->lock);
604 return ret;
605 }
606
607 /* Reserving a BO and its page table BOs must happen atomically to
608 * avoid deadlocks. Some operations update multiple VMs at once. Track
609 * all the reservation info in a context structure. Optionally a sync
610 * object can track VM updates.
611 */
612 struct bo_vm_reservation_context {
613 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
614 unsigned int n_vms; /* Number of VMs reserved */
615 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
616 struct ww_acquire_ctx ticket; /* Reservation ticket */
617 struct list_head list, duplicates; /* BO lists */
618 struct amdgpu_sync *sync; /* Pointer to sync object */
619 bool reserved; /* Whether BOs are reserved */
620 };
621
622 enum bo_vm_match {
623 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
624 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
625 BO_VM_ALL, /* Match all VMs a BO was added to */
626 };
627
628 /**
629 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
630 * @mem: KFD BO structure.
631 * @vm: the VM to reserve.
632 * @ctx: the struct that will be used in unreserve_bo_and_vms().
633 */
634 static int reserve_bo_and_vm(struct kgd_mem *mem,
635 struct amdgpu_vm *vm,
636 struct bo_vm_reservation_context *ctx)
637 {
638 struct amdgpu_bo *bo = mem->bo;
639 int ret;
640
641 WARN_ON(!vm);
642
643 ctx->reserved = false;
644 ctx->n_vms = 1;
645 ctx->sync = &mem->sync;
646
647 INIT_LIST_HEAD(&ctx->list);
648 INIT_LIST_HEAD(&ctx->duplicates);
649
650 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
651 if (!ctx->vm_pd)
652 return -ENOMEM;
653
654 ctx->kfd_bo.priority = 0;
655 ctx->kfd_bo.tv.bo = &bo->tbo;
656 ctx->kfd_bo.tv.num_shared = 1;
657 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
658
659 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
660
661 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
662 false, &ctx->duplicates);
663 if (ret) {
664 pr_err("Failed to reserve buffers in ttm.\n");
665 kfree(ctx->vm_pd);
666 ctx->vm_pd = NULL;
667 return ret;
668 }
669
670 ctx->reserved = true;
671 return 0;
672 }
673
674 /**
675 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
676 * @mem: KFD BO structure.
677 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
678 * is used. Otherwise, a single VM associated with the BO.
679 * @map_type: the mapping status that will be used to filter the VMs.
680 * @ctx: the struct that will be used in unreserve_bo_and_vms().
681 *
682 * Returns 0 for success, negative for failure.
683 */
684 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
685 struct amdgpu_vm *vm, enum bo_vm_match map_type,
686 struct bo_vm_reservation_context *ctx)
687 {
688 struct amdgpu_bo *bo = mem->bo;
689 struct kfd_bo_va_list *entry;
690 unsigned int i;
691 int ret;
692
693 ctx->reserved = false;
694 ctx->n_vms = 0;
695 ctx->vm_pd = NULL;
696 ctx->sync = &mem->sync;
697
698 INIT_LIST_HEAD(&ctx->list);
699 INIT_LIST_HEAD(&ctx->duplicates);
700
701 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
702 if ((vm && vm != entry->bo_va->base.vm) ||
703 (entry->is_mapped != map_type
704 && map_type != BO_VM_ALL))
705 continue;
706
707 ctx->n_vms++;
708 }
709
710 if (ctx->n_vms != 0) {
711 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
712 GFP_KERNEL);
713 if (!ctx->vm_pd)
714 return -ENOMEM;
715 }
716
717 ctx->kfd_bo.priority = 0;
718 ctx->kfd_bo.tv.bo = &bo->tbo;
719 ctx->kfd_bo.tv.num_shared = 1;
720 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
721
722 i = 0;
723 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
724 if ((vm && vm != entry->bo_va->base.vm) ||
725 (entry->is_mapped != map_type
726 && map_type != BO_VM_ALL))
727 continue;
728
729 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
730 &ctx->vm_pd[i]);
731 i++;
732 }
733
734 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
735 false, &ctx->duplicates);
736 if (ret) {
737 pr_err("Failed to reserve buffers in ttm.\n");
738 kfree(ctx->vm_pd);
739 ctx->vm_pd = NULL;
740 return ret;
741 }
742
743 ctx->reserved = true;
744 return 0;
745 }
746
747 /**
748 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
749 * @ctx: Reservation context to unreserve
750 * @wait: Optionally wait for a sync object representing pending VM updates
751 * @intr: Whether the wait is interruptible
752 *
753 * Also frees any resources allocated in
754 * reserve_bo_and_(cond_)vm(s). Returns the status from
755 * amdgpu_sync_wait.
756 */
757 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
758 bool wait, bool intr)
759 {
760 int ret = 0;
761
762 if (wait)
763 ret = amdgpu_sync_wait(ctx->sync, intr);
764
765 if (ctx->reserved)
766 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
767 kfree(ctx->vm_pd);
768
769 ctx->sync = NULL;
770
771 ctx->reserved = false;
772 ctx->vm_pd = NULL;
773
774 return ret;
775 }
776
777 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
778 struct kfd_bo_va_list *entry,
779 struct amdgpu_sync *sync)
780 {
781 struct amdgpu_bo_va *bo_va = entry->bo_va;
782 struct amdgpu_vm *vm = bo_va->base.vm;
783
784 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
785
786 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
787
788 amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
789
790 return 0;
791 }
792
793 static int update_gpuvm_pte(struct amdgpu_device *adev,
794 struct kfd_bo_va_list *entry,
795 struct amdgpu_sync *sync)
796 {
797 int ret;
798 struct amdgpu_bo_va *bo_va = entry->bo_va;
799
800 /* Update the page tables */
801 ret = amdgpu_vm_bo_update(adev, bo_va, false);
802 if (ret) {
803 pr_err("amdgpu_vm_bo_update failed\n");
804 return ret;
805 }
806
807 return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
808 }
809
810 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
811 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
812 bool no_update_pte)
813 {
814 int ret;
815
816 /* Set virtual address for the allocation */
817 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
818 amdgpu_bo_size(entry->bo_va->base.bo),
819 entry->pte_flags);
820 if (ret) {
821 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
822 entry->va, ret);
823 return ret;
824 }
825
826 if (no_update_pte)
827 return 0;
828
829 ret = update_gpuvm_pte(adev, entry, sync);
830 if (ret) {
831 pr_err("update_gpuvm_pte() failed\n");
832 goto update_gpuvm_pte_failed;
833 }
834
835 return 0;
836
837 update_gpuvm_pte_failed:
838 unmap_bo_from_gpuvm(adev, entry, sync);
839 return ret;
840 }
841
842 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
843 {
844 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
845
846 if (!sg)
847 return NULL;
848 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
849 kfree(sg);
850 return NULL;
851 }
852 sg->sgl->dma_address = addr;
853 sg->sgl->length = size;
854 #ifdef CONFIG_NEED_SG_DMA_LENGTH
855 sg->sgl->dma_length = size;
856 #endif
857 return sg;
858 }
859
860 static int process_validate_vms(struct amdkfd_process_info *process_info)
861 {
862 struct amdgpu_vm *peer_vm;
863 int ret;
864
865 list_for_each_entry(peer_vm, &process_info->vm_list_head,
866 vm_list_node) {
867 ret = vm_validate_pt_pd_bos(peer_vm);
868 if (ret)
869 return ret;
870 }
871
872 return 0;
873 }
874
875 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
876 struct amdgpu_sync *sync)
877 {
878 struct amdgpu_vm *peer_vm;
879 int ret;
880
881 list_for_each_entry(peer_vm, &process_info->vm_list_head,
882 vm_list_node) {
883 struct amdgpu_bo *pd = peer_vm->root.base.bo;
884
885 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
886 AMDGPU_SYNC_NE_OWNER,
887 AMDGPU_FENCE_OWNER_KFD);
888 if (ret)
889 return ret;
890 }
891
892 return 0;
893 }
894
895 static int process_update_pds(struct amdkfd_process_info *process_info,
896 struct amdgpu_sync *sync)
897 {
898 struct amdgpu_vm *peer_vm;
899 int ret;
900
901 list_for_each_entry(peer_vm, &process_info->vm_list_head,
902 vm_list_node) {
903 ret = vm_update_pds(peer_vm, sync);
904 if (ret)
905 return ret;
906 }
907
908 return 0;
909 }
910
911 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
912 struct dma_fence **ef)
913 {
914 struct amdkfd_process_info *info = NULL;
915 int ret;
916
917 if (!*process_info) {
918 info = kzalloc(sizeof(*info), GFP_KERNEL);
919 if (!info)
920 return -ENOMEM;
921
922 mutex_init(&info->lock);
923 INIT_LIST_HEAD(&info->vm_list_head);
924 INIT_LIST_HEAD(&info->kfd_bo_list);
925 INIT_LIST_HEAD(&info->userptr_valid_list);
926 INIT_LIST_HEAD(&info->userptr_inval_list);
927
928 info->eviction_fence =
929 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
930 current->mm);
931 if (!info->eviction_fence) {
932 pr_err("Failed to create eviction fence\n");
933 ret = -ENOMEM;
934 goto create_evict_fence_fail;
935 }
936
937 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
938 atomic_set(&info->evicted_bos, 0);
939 INIT_DELAYED_WORK(&info->restore_userptr_work,
940 amdgpu_amdkfd_restore_userptr_worker);
941
942 *process_info = info;
943 *ef = dma_fence_get(&info->eviction_fence->base);
944 }
945
946 vm->process_info = *process_info;
947
948 /* Validate page directory and attach eviction fence */
949 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
950 if (ret)
951 goto reserve_pd_fail;
952 ret = vm_validate_pt_pd_bos(vm);
953 if (ret) {
954 pr_err("validate_pt_pd_bos() failed\n");
955 goto validate_pd_fail;
956 }
957 ret = amdgpu_bo_sync_wait(vm->root.base.bo,
958 AMDGPU_FENCE_OWNER_KFD, false);
959 if (ret)
960 goto wait_pd_fail;
961 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
962 if (ret)
963 goto reserve_shared_fail;
964 amdgpu_bo_fence(vm->root.base.bo,
965 &vm->process_info->eviction_fence->base, true);
966 amdgpu_bo_unreserve(vm->root.base.bo);
967
968 /* Update process info */
969 mutex_lock(&vm->process_info->lock);
970 list_add_tail(&vm->vm_list_node,
971 &(vm->process_info->vm_list_head));
972 vm->process_info->n_vms++;
973 mutex_unlock(&vm->process_info->lock);
974
975 return 0;
976
977 reserve_shared_fail:
978 wait_pd_fail:
979 validate_pd_fail:
980 amdgpu_bo_unreserve(vm->root.base.bo);
981 reserve_pd_fail:
982 vm->process_info = NULL;
983 if (info) {
984 /* Two fence references: one in info and one in *ef */
985 dma_fence_put(&info->eviction_fence->base);
986 dma_fence_put(*ef);
987 *ef = NULL;
988 *process_info = NULL;
989 put_pid(info->pid);
990 create_evict_fence_fail:
991 mutex_destroy(&info->lock);
992 kfree(info);
993 }
994 return ret;
995 }
996
997 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
998 void **vm, void **process_info,
999 struct dma_fence **ef)
1000 {
1001 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1002 struct amdgpu_vm *new_vm;
1003 int ret;
1004
1005 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1006 if (!new_vm)
1007 return -ENOMEM;
1008
1009 /* Initialize AMDGPU part of the VM */
1010 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1011 if (ret) {
1012 pr_err("Failed init vm ret %d\n", ret);
1013 goto amdgpu_vm_init_fail;
1014 }
1015
1016 /* Initialize KFD part of the VM and process info */
1017 ret = init_kfd_vm(new_vm, process_info, ef);
1018 if (ret)
1019 goto init_kfd_vm_fail;
1020
1021 *vm = (void *) new_vm;
1022
1023 return 0;
1024
1025 init_kfd_vm_fail:
1026 amdgpu_vm_fini(adev, new_vm);
1027 amdgpu_vm_init_fail:
1028 kfree(new_vm);
1029 return ret;
1030 }
1031
1032 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1033 struct file *filp, unsigned int pasid,
1034 void **vm, void **process_info,
1035 struct dma_fence **ef)
1036 {
1037 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1038 struct drm_file *drm_priv = filp->private_data;
1039 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1040 struct amdgpu_vm *avm = &drv_priv->vm;
1041 int ret;
1042
1043 /* Already a compute VM? */
1044 if (avm->process_info)
1045 return -EINVAL;
1046
1047 /* Convert VM into a compute VM */
1048 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1049 if (ret)
1050 return ret;
1051
1052 /* Initialize KFD part of the VM and process info */
1053 ret = init_kfd_vm(avm, process_info, ef);
1054 if (ret)
1055 return ret;
1056
1057 *vm = (void *)avm;
1058
1059 return 0;
1060 }
1061
1062 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1063 struct amdgpu_vm *vm)
1064 {
1065 struct amdkfd_process_info *process_info = vm->process_info;
1066 struct amdgpu_bo *pd = vm->root.base.bo;
1067
1068 if (!process_info)
1069 return;
1070
1071 /* Release eviction fence from PD */
1072 amdgpu_bo_reserve(pd, false);
1073 amdgpu_bo_fence(pd, NULL, false);
1074 amdgpu_bo_unreserve(pd);
1075
1076 /* Update process info */
1077 mutex_lock(&process_info->lock);
1078 process_info->n_vms--;
1079 list_del(&vm->vm_list_node);
1080 mutex_unlock(&process_info->lock);
1081
1082 vm->process_info = NULL;
1083
1084 /* Release per-process resources when last compute VM is destroyed */
1085 if (!process_info->n_vms) {
1086 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1087 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1088 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1089
1090 dma_fence_put(&process_info->eviction_fence->base);
1091 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1092 put_pid(process_info->pid);
1093 mutex_destroy(&process_info->lock);
1094 kfree(process_info);
1095 }
1096 }
1097
1098 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1099 {
1100 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1101 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1102
1103 if (WARN_ON(!kgd || !vm))
1104 return;
1105
1106 pr_debug("Destroying process vm %p\n", vm);
1107
1108 /* Release the VM context */
1109 amdgpu_vm_fini(adev, avm);
1110 kfree(vm);
1111 }
1112
1113 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1114 {
1115 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1116 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1117
1118 if (WARN_ON(!kgd || !vm))
1119 return;
1120
1121 pr_debug("Releasing process vm %p\n", vm);
1122
1123 /* The original pasid of amdgpu vm has already been
1124 * released during making a amdgpu vm to a compute vm
1125 * The current pasid is managed by kfd and will be
1126 * released on kfd process destroy. Set amdgpu pasid
1127 * to 0 to avoid duplicate release.
1128 */
1129 amdgpu_vm_release_compute(adev, avm);
1130 }
1131
1132 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1133 {
1134 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1135 struct amdgpu_bo *pd = avm->root.base.bo;
1136 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1137
1138 if (adev->asic_type < CHIP_VEGA10)
1139 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1140 return avm->pd_phys_addr;
1141 }
1142
1143 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1144 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1145 void *vm, struct kgd_mem **mem,
1146 uint64_t *offset, uint32_t flags)
1147 {
1148 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1149 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1150 enum ttm_bo_type bo_type = ttm_bo_type_device;
1151 struct sg_table *sg = NULL;
1152 uint64_t user_addr = 0;
1153 struct amdgpu_bo *bo;
1154 struct amdgpu_bo_param bp;
1155 u32 domain, alloc_domain;
1156 u64 alloc_flags;
1157 int ret;
1158
1159 /*
1160 * Check on which domain to allocate BO
1161 */
1162 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1163 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1164 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1165 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1166 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1167 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1168 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1169 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1170 alloc_flags = 0;
1171 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1172 domain = AMDGPU_GEM_DOMAIN_GTT;
1173 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1174 alloc_flags = 0;
1175 if (!offset || !*offset)
1176 return -EINVAL;
1177 user_addr = untagged_addr(*offset);
1178 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1179 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1180 domain = AMDGPU_GEM_DOMAIN_GTT;
1181 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1182 bo_type = ttm_bo_type_sg;
1183 alloc_flags = 0;
1184 if (size > UINT_MAX)
1185 return -EINVAL;
1186 sg = create_doorbell_sg(*offset, size);
1187 if (!sg)
1188 return -ENOMEM;
1189 } else {
1190 return -EINVAL;
1191 }
1192
1193 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1194 if (!*mem) {
1195 ret = -ENOMEM;
1196 goto err;
1197 }
1198 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1199 mutex_init(&(*mem)->lock);
1200 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1201
1202 /* Workaround for AQL queue wraparound bug. Map the same
1203 * memory twice. That means we only actually allocate half
1204 * the memory.
1205 */
1206 if ((*mem)->aql_queue)
1207 size = size >> 1;
1208
1209 (*mem)->alloc_flags = flags;
1210
1211 amdgpu_sync_create(&(*mem)->sync);
1212
1213 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1214 if (ret) {
1215 pr_debug("Insufficient system memory\n");
1216 goto err_reserve_limit;
1217 }
1218
1219 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1220 va, size, domain_string(alloc_domain));
1221
1222 memset(&bp, 0, sizeof(bp));
1223 bp.size = size;
1224 bp.byte_align = 1;
1225 bp.domain = alloc_domain;
1226 bp.flags = alloc_flags;
1227 bp.type = bo_type;
1228 bp.resv = NULL;
1229 ret = amdgpu_bo_create(adev, &bp, &bo);
1230 if (ret) {
1231 pr_debug("Failed to create BO on domain %s. ret %d\n",
1232 domain_string(alloc_domain), ret);
1233 goto err_bo_create;
1234 }
1235 if (bo_type == ttm_bo_type_sg) {
1236 bo->tbo.sg = sg;
1237 bo->tbo.ttm->sg = sg;
1238 }
1239 bo->kfd_bo = *mem;
1240 (*mem)->bo = bo;
1241 if (user_addr)
1242 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1243
1244 (*mem)->va = va;
1245 (*mem)->domain = domain;
1246 (*mem)->mapped_to_gpu_memory = 0;
1247 (*mem)->process_info = avm->process_info;
1248 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1249
1250 if (user_addr) {
1251 ret = init_user_pages(*mem, user_addr);
1252 if (ret)
1253 goto allocate_init_user_pages_failed;
1254 }
1255
1256 if (offset)
1257 *offset = amdgpu_bo_mmap_offset(bo);
1258
1259 return 0;
1260
1261 allocate_init_user_pages_failed:
1262 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1263 amdgpu_bo_unref(&bo);
1264 /* Don't unreserve system mem limit twice */
1265 goto err_reserve_limit;
1266 err_bo_create:
1267 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1268 err_reserve_limit:
1269 mutex_destroy(&(*mem)->lock);
1270 kfree(*mem);
1271 err:
1272 if (sg) {
1273 sg_free_table(sg);
1274 kfree(sg);
1275 }
1276 return ret;
1277 }
1278
1279 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1280 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1281 {
1282 struct amdkfd_process_info *process_info = mem->process_info;
1283 unsigned long bo_size = mem->bo->tbo.mem.size;
1284 struct kfd_bo_va_list *entry, *tmp;
1285 struct bo_vm_reservation_context ctx;
1286 struct ttm_validate_buffer *bo_list_entry;
1287 unsigned int mapped_to_gpu_memory;
1288 int ret;
1289 bool is_imported = 0;
1290
1291 mutex_lock(&mem->lock);
1292 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1293 is_imported = mem->is_imported;
1294 mutex_unlock(&mem->lock);
1295 /* lock is not needed after this, since mem is unused and will
1296 * be freed anyway
1297 */
1298
1299 if (mapped_to_gpu_memory > 0) {
1300 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1301 mem->va, bo_size);
1302 return -EBUSY;
1303 }
1304
1305 /* Make sure restore workers don't access the BO any more */
1306 bo_list_entry = &mem->validate_list;
1307 mutex_lock(&process_info->lock);
1308 list_del(&bo_list_entry->head);
1309 mutex_unlock(&process_info->lock);
1310
1311 /* No more MMU notifiers */
1312 amdgpu_mn_unregister(mem->bo);
1313
1314 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1315 if (unlikely(ret))
1316 return ret;
1317
1318 /* The eviction fence should be removed by the last unmap.
1319 * TODO: Log an error condition if the bo still has the eviction fence
1320 * attached
1321 */
1322 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1323 process_info->eviction_fence);
1324 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1325 mem->va + bo_size * (1 + mem->aql_queue));
1326
1327 /* Remove from VM internal data structures */
1328 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1329 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1330 entry, bo_size);
1331
1332 ret = unreserve_bo_and_vms(&ctx, false, false);
1333
1334 /* Free the sync object */
1335 amdgpu_sync_free(&mem->sync);
1336
1337 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1338 * remap BO. We need to free it.
1339 */
1340 if (mem->bo->tbo.sg) {
1341 sg_free_table(mem->bo->tbo.sg);
1342 kfree(mem->bo->tbo.sg);
1343 }
1344
1345 /* Update the size of the BO being freed if it was allocated from
1346 * VRAM and is not imported.
1347 */
1348 if (size) {
1349 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1350 (!is_imported))
1351 *size = bo_size;
1352 else
1353 *size = 0;
1354 }
1355
1356 /* Free the BO*/
1357 drm_gem_object_put_unlocked(&mem->bo->tbo.base);
1358 mutex_destroy(&mem->lock);
1359 kfree(mem);
1360
1361 return ret;
1362 }
1363
1364 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1365 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1366 {
1367 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1368 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1369 int ret;
1370 struct amdgpu_bo *bo;
1371 uint32_t domain;
1372 struct kfd_bo_va_list *entry;
1373 struct bo_vm_reservation_context ctx;
1374 struct kfd_bo_va_list *bo_va_entry = NULL;
1375 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1376 unsigned long bo_size;
1377 bool is_invalid_userptr = false;
1378
1379 bo = mem->bo;
1380 if (!bo) {
1381 pr_err("Invalid BO when mapping memory to GPU\n");
1382 return -EINVAL;
1383 }
1384
1385 /* Make sure restore is not running concurrently. Since we
1386 * don't map invalid userptr BOs, we rely on the next restore
1387 * worker to do the mapping
1388 */
1389 mutex_lock(&mem->process_info->lock);
1390
1391 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1392 * sure that the MMU notifier is no longer running
1393 * concurrently and the queues are actually stopped
1394 */
1395 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1396 mmap_write_lock(current->mm);
1397 is_invalid_userptr = atomic_read(&mem->invalid);
1398 mmap_write_unlock(current->mm);
1399 }
1400
1401 mutex_lock(&mem->lock);
1402
1403 domain = mem->domain;
1404 bo_size = bo->tbo.mem.size;
1405
1406 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1407 mem->va,
1408 mem->va + bo_size * (1 + mem->aql_queue),
1409 vm, domain_string(domain));
1410
1411 ret = reserve_bo_and_vm(mem, vm, &ctx);
1412 if (unlikely(ret))
1413 goto out;
1414
1415 /* Userptr can be marked as "not invalid", but not actually be
1416 * validated yet (still in the system domain). In that case
1417 * the queues are still stopped and we can leave mapping for
1418 * the next restore worker
1419 */
1420 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1421 bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1422 is_invalid_userptr = true;
1423
1424 if (check_if_add_bo_to_vm(avm, mem)) {
1425 ret = add_bo_to_vm(adev, mem, avm, false,
1426 &bo_va_entry);
1427 if (ret)
1428 goto add_bo_to_vm_failed;
1429 if (mem->aql_queue) {
1430 ret = add_bo_to_vm(adev, mem, avm,
1431 true, &bo_va_entry_aql);
1432 if (ret)
1433 goto add_bo_to_vm_failed_aql;
1434 }
1435 } else {
1436 ret = vm_validate_pt_pd_bos(avm);
1437 if (unlikely(ret))
1438 goto add_bo_to_vm_failed;
1439 }
1440
1441 if (mem->mapped_to_gpu_memory == 0 &&
1442 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1443 /* Validate BO only once. The eviction fence gets added to BO
1444 * the first time it is mapped. Validate will wait for all
1445 * background evictions to complete.
1446 */
1447 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1448 if (ret) {
1449 pr_debug("Validate failed\n");
1450 goto map_bo_to_gpuvm_failed;
1451 }
1452 }
1453
1454 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1455 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1456 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1457 entry->va, entry->va + bo_size,
1458 entry);
1459
1460 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1461 is_invalid_userptr);
1462 if (ret) {
1463 pr_err("Failed to map bo to gpuvm\n");
1464 goto map_bo_to_gpuvm_failed;
1465 }
1466
1467 ret = vm_update_pds(vm, ctx.sync);
1468 if (ret) {
1469 pr_err("Failed to update page directories\n");
1470 goto map_bo_to_gpuvm_failed;
1471 }
1472
1473 entry->is_mapped = true;
1474 mem->mapped_to_gpu_memory++;
1475 pr_debug("\t INC mapping count %d\n",
1476 mem->mapped_to_gpu_memory);
1477 }
1478 }
1479
1480 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1481 amdgpu_bo_fence(bo,
1482 &avm->process_info->eviction_fence->base,
1483 true);
1484 ret = unreserve_bo_and_vms(&ctx, false, false);
1485
1486 goto out;
1487
1488 map_bo_to_gpuvm_failed:
1489 if (bo_va_entry_aql)
1490 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1491 add_bo_to_vm_failed_aql:
1492 if (bo_va_entry)
1493 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1494 add_bo_to_vm_failed:
1495 unreserve_bo_and_vms(&ctx, false, false);
1496 out:
1497 mutex_unlock(&mem->process_info->lock);
1498 mutex_unlock(&mem->lock);
1499 return ret;
1500 }
1501
1502 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1503 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1504 {
1505 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1506 struct amdkfd_process_info *process_info =
1507 ((struct amdgpu_vm *)vm)->process_info;
1508 unsigned long bo_size = mem->bo->tbo.mem.size;
1509 struct kfd_bo_va_list *entry;
1510 struct bo_vm_reservation_context ctx;
1511 int ret;
1512
1513 mutex_lock(&mem->lock);
1514
1515 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1516 if (unlikely(ret))
1517 goto out;
1518 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1519 if (ctx.n_vms == 0) {
1520 ret = -EINVAL;
1521 goto unreserve_out;
1522 }
1523
1524 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1525 if (unlikely(ret))
1526 goto unreserve_out;
1527
1528 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1529 mem->va,
1530 mem->va + bo_size * (1 + mem->aql_queue),
1531 vm);
1532
1533 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1534 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1535 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1536 entry->va,
1537 entry->va + bo_size,
1538 entry);
1539
1540 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1541 if (ret == 0) {
1542 entry->is_mapped = false;
1543 } else {
1544 pr_err("failed to unmap VA 0x%llx\n",
1545 mem->va);
1546 goto unreserve_out;
1547 }
1548
1549 mem->mapped_to_gpu_memory--;
1550 pr_debug("\t DEC mapping count %d\n",
1551 mem->mapped_to_gpu_memory);
1552 }
1553 }
1554
1555 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1556 * required.
1557 */
1558 if (mem->mapped_to_gpu_memory == 0 &&
1559 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1560 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1561 process_info->eviction_fence);
1562
1563 unreserve_out:
1564 unreserve_bo_and_vms(&ctx, false, false);
1565 out:
1566 mutex_unlock(&mem->lock);
1567 return ret;
1568 }
1569
1570 int amdgpu_amdkfd_gpuvm_sync_memory(
1571 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1572 {
1573 struct amdgpu_sync sync;
1574 int ret;
1575
1576 amdgpu_sync_create(&sync);
1577
1578 mutex_lock(&mem->lock);
1579 amdgpu_sync_clone(&mem->sync, &sync);
1580 mutex_unlock(&mem->lock);
1581
1582 ret = amdgpu_sync_wait(&sync, intr);
1583 amdgpu_sync_free(&sync);
1584 return ret;
1585 }
1586
1587 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1588 struct kgd_mem *mem, void **kptr, uint64_t *size)
1589 {
1590 int ret;
1591 struct amdgpu_bo *bo = mem->bo;
1592
1593 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1594 pr_err("userptr can't be mapped to kernel\n");
1595 return -EINVAL;
1596 }
1597
1598 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1599 * this BO in BO's restoring after eviction.
1600 */
1601 mutex_lock(&mem->process_info->lock);
1602
1603 ret = amdgpu_bo_reserve(bo, true);
1604 if (ret) {
1605 pr_err("Failed to reserve bo. ret %d\n", ret);
1606 goto bo_reserve_failed;
1607 }
1608
1609 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1610 if (ret) {
1611 pr_err("Failed to pin bo. ret %d\n", ret);
1612 goto pin_failed;
1613 }
1614
1615 ret = amdgpu_bo_kmap(bo, kptr);
1616 if (ret) {
1617 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1618 goto kmap_failed;
1619 }
1620
1621 amdgpu_amdkfd_remove_eviction_fence(
1622 bo, mem->process_info->eviction_fence);
1623 list_del_init(&mem->validate_list.head);
1624
1625 if (size)
1626 *size = amdgpu_bo_size(bo);
1627
1628 amdgpu_bo_unreserve(bo);
1629
1630 mutex_unlock(&mem->process_info->lock);
1631 return 0;
1632
1633 kmap_failed:
1634 amdgpu_bo_unpin(bo);
1635 pin_failed:
1636 amdgpu_bo_unreserve(bo);
1637 bo_reserve_failed:
1638 mutex_unlock(&mem->process_info->lock);
1639
1640 return ret;
1641 }
1642
1643 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1644 struct kfd_vm_fault_info *mem)
1645 {
1646 struct amdgpu_device *adev;
1647
1648 adev = (struct amdgpu_device *)kgd;
1649 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1650 *mem = *adev->gmc.vm_fault_info;
1651 mb();
1652 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1653 }
1654 return 0;
1655 }
1656
1657 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1658 struct dma_buf *dma_buf,
1659 uint64_t va, void *vm,
1660 struct kgd_mem **mem, uint64_t *size,
1661 uint64_t *mmap_offset)
1662 {
1663 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1664 struct drm_gem_object *obj;
1665 struct amdgpu_bo *bo;
1666 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1667
1668 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1669 /* Can't handle non-graphics buffers */
1670 return -EINVAL;
1671
1672 obj = dma_buf->priv;
1673 if (obj->dev->dev_private != adev)
1674 /* Can't handle buffers from other devices */
1675 return -EINVAL;
1676
1677 bo = gem_to_amdgpu_bo(obj);
1678 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1679 AMDGPU_GEM_DOMAIN_GTT)))
1680 /* Only VRAM and GTT BOs are supported */
1681 return -EINVAL;
1682
1683 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1684 if (!*mem)
1685 return -ENOMEM;
1686
1687 if (size)
1688 *size = amdgpu_bo_size(bo);
1689
1690 if (mmap_offset)
1691 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1692
1693 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1694 mutex_init(&(*mem)->lock);
1695
1696 (*mem)->alloc_flags =
1697 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1698 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1699 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1700 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1701
1702 drm_gem_object_get(&bo->tbo.base);
1703 (*mem)->bo = bo;
1704 (*mem)->va = va;
1705 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1706 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1707 (*mem)->mapped_to_gpu_memory = 0;
1708 (*mem)->process_info = avm->process_info;
1709 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1710 amdgpu_sync_create(&(*mem)->sync);
1711 (*mem)->is_imported = true;
1712
1713 return 0;
1714 }
1715
1716 /* Evict a userptr BO by stopping the queues if necessary
1717 *
1718 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1719 * cannot do any memory allocations, and cannot take any locks that
1720 * are held elsewhere while allocating memory. Therefore this is as
1721 * simple as possible, using atomic counters.
1722 *
1723 * It doesn't do anything to the BO itself. The real work happens in
1724 * restore, where we get updated page addresses. This function only
1725 * ensures that GPU access to the BO is stopped.
1726 */
1727 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1728 struct mm_struct *mm)
1729 {
1730 struct amdkfd_process_info *process_info = mem->process_info;
1731 int evicted_bos;
1732 int r = 0;
1733
1734 atomic_inc(&mem->invalid);
1735 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1736 if (evicted_bos == 1) {
1737 /* First eviction, stop the queues */
1738 r = kgd2kfd_quiesce_mm(mm);
1739 if (r)
1740 pr_err("Failed to quiesce KFD\n");
1741 schedule_delayed_work(&process_info->restore_userptr_work,
1742 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1743 }
1744
1745 return r;
1746 }
1747
1748 /* Update invalid userptr BOs
1749 *
1750 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1751 * userptr_inval_list and updates user pages for all BOs that have
1752 * been invalidated since their last update.
1753 */
1754 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1755 struct mm_struct *mm)
1756 {
1757 struct kgd_mem *mem, *tmp_mem;
1758 struct amdgpu_bo *bo;
1759 struct ttm_operation_ctx ctx = { false, false };
1760 int invalid, ret;
1761
1762 /* Move all invalidated BOs to the userptr_inval_list and
1763 * release their user pages by migration to the CPU domain
1764 */
1765 list_for_each_entry_safe(mem, tmp_mem,
1766 &process_info->userptr_valid_list,
1767 validate_list.head) {
1768 if (!atomic_read(&mem->invalid))
1769 continue; /* BO is still valid */
1770
1771 bo = mem->bo;
1772
1773 if (amdgpu_bo_reserve(bo, true))
1774 return -EAGAIN;
1775 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1776 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1777 amdgpu_bo_unreserve(bo);
1778 if (ret) {
1779 pr_err("%s: Failed to invalidate userptr BO\n",
1780 __func__);
1781 return -EAGAIN;
1782 }
1783
1784 list_move_tail(&mem->validate_list.head,
1785 &process_info->userptr_inval_list);
1786 }
1787
1788 if (list_empty(&process_info->userptr_inval_list))
1789 return 0; /* All evicted userptr BOs were freed */
1790
1791 /* Go through userptr_inval_list and update any invalid user_pages */
1792 list_for_each_entry(mem, &process_info->userptr_inval_list,
1793 validate_list.head) {
1794 invalid = atomic_read(&mem->invalid);
1795 if (!invalid)
1796 /* BO hasn't been invalidated since the last
1797 * revalidation attempt. Keep its BO list.
1798 */
1799 continue;
1800
1801 bo = mem->bo;
1802
1803 /* Get updated user pages */
1804 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1805 if (ret) {
1806 pr_debug("%s: Failed to get user pages: %d\n",
1807 __func__, ret);
1808
1809 /* Return error -EBUSY or -ENOMEM, retry restore */
1810 return ret;
1811 }
1812
1813 /*
1814 * FIXME: Cannot ignore the return code, must hold
1815 * notifier_lock
1816 */
1817 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1818
1819 /* Mark the BO as valid unless it was invalidated
1820 * again concurrently.
1821 */
1822 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1823 return -EAGAIN;
1824 }
1825
1826 return 0;
1827 }
1828
1829 /* Validate invalid userptr BOs
1830 *
1831 * Validates BOs on the userptr_inval_list, and moves them back to the
1832 * userptr_valid_list. Also updates GPUVM page tables with new page
1833 * addresses and waits for the page table updates to complete.
1834 */
1835 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1836 {
1837 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1838 struct list_head resv_list, duplicates;
1839 struct ww_acquire_ctx ticket;
1840 struct amdgpu_sync sync;
1841
1842 struct amdgpu_vm *peer_vm;
1843 struct kgd_mem *mem, *tmp_mem;
1844 struct amdgpu_bo *bo;
1845 struct ttm_operation_ctx ctx = { false, false };
1846 int i, ret;
1847
1848 pd_bo_list_entries = kcalloc(process_info->n_vms,
1849 sizeof(struct amdgpu_bo_list_entry),
1850 GFP_KERNEL);
1851 if (!pd_bo_list_entries) {
1852 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1853 ret = -ENOMEM;
1854 goto out_no_mem;
1855 }
1856
1857 INIT_LIST_HEAD(&resv_list);
1858 INIT_LIST_HEAD(&duplicates);
1859
1860 /* Get all the page directory BOs that need to be reserved */
1861 i = 0;
1862 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1863 vm_list_node)
1864 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1865 &pd_bo_list_entries[i++]);
1866 /* Add the userptr_inval_list entries to resv_list */
1867 list_for_each_entry(mem, &process_info->userptr_inval_list,
1868 validate_list.head) {
1869 list_add_tail(&mem->resv_list.head, &resv_list);
1870 mem->resv_list.bo = mem->validate_list.bo;
1871 mem->resv_list.num_shared = mem->validate_list.num_shared;
1872 }
1873
1874 /* Reserve all BOs and page tables for validation */
1875 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1876 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1877 if (ret)
1878 goto out_free;
1879
1880 amdgpu_sync_create(&sync);
1881
1882 ret = process_validate_vms(process_info);
1883 if (ret)
1884 goto unreserve_out;
1885
1886 /* Validate BOs and update GPUVM page tables */
1887 list_for_each_entry_safe(mem, tmp_mem,
1888 &process_info->userptr_inval_list,
1889 validate_list.head) {
1890 struct kfd_bo_va_list *bo_va_entry;
1891
1892 bo = mem->bo;
1893
1894 /* Validate the BO if we got user pages */
1895 if (bo->tbo.ttm->pages[0]) {
1896 amdgpu_bo_placement_from_domain(bo, mem->domain);
1897 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1898 if (ret) {
1899 pr_err("%s: failed to validate BO\n", __func__);
1900 goto unreserve_out;
1901 }
1902 }
1903
1904 list_move_tail(&mem->validate_list.head,
1905 &process_info->userptr_valid_list);
1906
1907 /* Update mapping. If the BO was not validated
1908 * (because we couldn't get user pages), this will
1909 * clear the page table entries, which will result in
1910 * VM faults if the GPU tries to access the invalid
1911 * memory.
1912 */
1913 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1914 if (!bo_va_entry->is_mapped)
1915 continue;
1916
1917 ret = update_gpuvm_pte((struct amdgpu_device *)
1918 bo_va_entry->kgd_dev,
1919 bo_va_entry, &sync);
1920 if (ret) {
1921 pr_err("%s: update PTE failed\n", __func__);
1922 /* make sure this gets validated again */
1923 atomic_inc(&mem->invalid);
1924 goto unreserve_out;
1925 }
1926 }
1927 }
1928
1929 /* Update page directories */
1930 ret = process_update_pds(process_info, &sync);
1931
1932 unreserve_out:
1933 ttm_eu_backoff_reservation(&ticket, &resv_list);
1934 amdgpu_sync_wait(&sync, false);
1935 amdgpu_sync_free(&sync);
1936 out_free:
1937 kfree(pd_bo_list_entries);
1938 out_no_mem:
1939
1940 return ret;
1941 }
1942
1943 /* Worker callback to restore evicted userptr BOs
1944 *
1945 * Tries to update and validate all userptr BOs. If successful and no
1946 * concurrent evictions happened, the queues are restarted. Otherwise,
1947 * reschedule for another attempt later.
1948 */
1949 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1950 {
1951 struct delayed_work *dwork = to_delayed_work(work);
1952 struct amdkfd_process_info *process_info =
1953 container_of(dwork, struct amdkfd_process_info,
1954 restore_userptr_work);
1955 struct task_struct *usertask;
1956 struct mm_struct *mm;
1957 int evicted_bos;
1958
1959 evicted_bos = atomic_read(&process_info->evicted_bos);
1960 if (!evicted_bos)
1961 return;
1962
1963 /* Reference task and mm in case of concurrent process termination */
1964 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1965 if (!usertask)
1966 return;
1967 mm = get_task_mm(usertask);
1968 if (!mm) {
1969 put_task_struct(usertask);
1970 return;
1971 }
1972
1973 mutex_lock(&process_info->lock);
1974
1975 if (update_invalid_user_pages(process_info, mm))
1976 goto unlock_out;
1977 /* userptr_inval_list can be empty if all evicted userptr BOs
1978 * have been freed. In that case there is nothing to validate
1979 * and we can just restart the queues.
1980 */
1981 if (!list_empty(&process_info->userptr_inval_list)) {
1982 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1983 goto unlock_out; /* Concurrent eviction, try again */
1984
1985 if (validate_invalid_user_pages(process_info))
1986 goto unlock_out;
1987 }
1988 /* Final check for concurrent evicton and atomic update. If
1989 * another eviction happens after successful update, it will
1990 * be a first eviction that calls quiesce_mm. The eviction
1991 * reference counting inside KFD will handle this case.
1992 */
1993 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1994 evicted_bos)
1995 goto unlock_out;
1996 evicted_bos = 0;
1997 if (kgd2kfd_resume_mm(mm)) {
1998 pr_err("%s: Failed to resume KFD\n", __func__);
1999 /* No recovery from this failure. Probably the CP is
2000 * hanging. No point trying again.
2001 */
2002 }
2003
2004 unlock_out:
2005 mutex_unlock(&process_info->lock);
2006 mmput(mm);
2007 put_task_struct(usertask);
2008
2009 /* If validation failed, reschedule another attempt */
2010 if (evicted_bos)
2011 schedule_delayed_work(&process_info->restore_userptr_work,
2012 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2013 }
2014
2015 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2016 * KFD process identified by process_info
2017 *
2018 * @process_info: amdkfd_process_info of the KFD process
2019 *
2020 * After memory eviction, restore thread calls this function. The function
2021 * should be called when the Process is still valid. BO restore involves -
2022 *
2023 * 1. Release old eviction fence and create new one
2024 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2025 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2026 * BOs that need to be reserved.
2027 * 4. Reserve all the BOs
2028 * 5. Validate of PD and PT BOs.
2029 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2030 * 7. Add fence to all PD and PT BOs.
2031 * 8. Unreserve all BOs
2032 */
2033 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2034 {
2035 struct amdgpu_bo_list_entry *pd_bo_list;
2036 struct amdkfd_process_info *process_info = info;
2037 struct amdgpu_vm *peer_vm;
2038 struct kgd_mem *mem;
2039 struct bo_vm_reservation_context ctx;
2040 struct amdgpu_amdkfd_fence *new_fence;
2041 int ret = 0, i;
2042 struct list_head duplicate_save;
2043 struct amdgpu_sync sync_obj;
2044
2045 INIT_LIST_HEAD(&duplicate_save);
2046 INIT_LIST_HEAD(&ctx.list);
2047 INIT_LIST_HEAD(&ctx.duplicates);
2048
2049 pd_bo_list = kcalloc(process_info->n_vms,
2050 sizeof(struct amdgpu_bo_list_entry),
2051 GFP_KERNEL);
2052 if (!pd_bo_list)
2053 return -ENOMEM;
2054
2055 i = 0;
2056 mutex_lock(&process_info->lock);
2057 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2058 vm_list_node)
2059 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2060
2061 /* Reserve all BOs and page tables/directory. Add all BOs from
2062 * kfd_bo_list to ctx.list
2063 */
2064 list_for_each_entry(mem, &process_info->kfd_bo_list,
2065 validate_list.head) {
2066
2067 list_add_tail(&mem->resv_list.head, &ctx.list);
2068 mem->resv_list.bo = mem->validate_list.bo;
2069 mem->resv_list.num_shared = mem->validate_list.num_shared;
2070 }
2071
2072 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2073 false, &duplicate_save);
2074 if (ret) {
2075 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2076 goto ttm_reserve_fail;
2077 }
2078
2079 amdgpu_sync_create(&sync_obj);
2080
2081 /* Validate PDs and PTs */
2082 ret = process_validate_vms(process_info);
2083 if (ret)
2084 goto validate_map_fail;
2085
2086 ret = process_sync_pds_resv(process_info, &sync_obj);
2087 if (ret) {
2088 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2089 goto validate_map_fail;
2090 }
2091
2092 /* Validate BOs and map them to GPUVM (update VM page tables). */
2093 list_for_each_entry(mem, &process_info->kfd_bo_list,
2094 validate_list.head) {
2095
2096 struct amdgpu_bo *bo = mem->bo;
2097 uint32_t domain = mem->domain;
2098 struct kfd_bo_va_list *bo_va_entry;
2099
2100 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2101 if (ret) {
2102 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2103 goto validate_map_fail;
2104 }
2105 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
2106 if (ret) {
2107 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2108 goto validate_map_fail;
2109 }
2110 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2111 bo_list) {
2112 ret = update_gpuvm_pte((struct amdgpu_device *)
2113 bo_va_entry->kgd_dev,
2114 bo_va_entry,
2115 &sync_obj);
2116 if (ret) {
2117 pr_debug("Memory eviction: update PTE failed. Try again\n");
2118 goto validate_map_fail;
2119 }
2120 }
2121 }
2122
2123 /* Update page directories */
2124 ret = process_update_pds(process_info, &sync_obj);
2125 if (ret) {
2126 pr_debug("Memory eviction: update PDs failed. Try again\n");
2127 goto validate_map_fail;
2128 }
2129
2130 /* Wait for validate and PT updates to finish */
2131 amdgpu_sync_wait(&sync_obj, false);
2132
2133 /* Release old eviction fence and create new one, because fence only
2134 * goes from unsignaled to signaled, fence cannot be reused.
2135 * Use context and mm from the old fence.
2136 */
2137 new_fence = amdgpu_amdkfd_fence_create(
2138 process_info->eviction_fence->base.context,
2139 process_info->eviction_fence->mm);
2140 if (!new_fence) {
2141 pr_err("Failed to create eviction fence\n");
2142 ret = -ENOMEM;
2143 goto validate_map_fail;
2144 }
2145 dma_fence_put(&process_info->eviction_fence->base);
2146 process_info->eviction_fence = new_fence;
2147 *ef = dma_fence_get(&new_fence->base);
2148
2149 /* Attach new eviction fence to all BOs */
2150 list_for_each_entry(mem, &process_info->kfd_bo_list,
2151 validate_list.head)
2152 amdgpu_bo_fence(mem->bo,
2153 &process_info->eviction_fence->base, true);
2154
2155 /* Attach eviction fence to PD / PT BOs */
2156 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2157 vm_list_node) {
2158 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2159
2160 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2161 }
2162
2163 validate_map_fail:
2164 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2165 amdgpu_sync_free(&sync_obj);
2166 ttm_reserve_fail:
2167 mutex_unlock(&process_info->lock);
2168 kfree(pd_bo_list);
2169 return ret;
2170 }
2171
2172 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2173 {
2174 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2175 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2176 int ret;
2177
2178 if (!info || !gws)
2179 return -EINVAL;
2180
2181 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2182 if (!*mem)
2183 return -ENOMEM;
2184
2185 mutex_init(&(*mem)->lock);
2186 INIT_LIST_HEAD(&(*mem)->bo_va_list);
2187 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2188 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2189 (*mem)->process_info = process_info;
2190 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2191 amdgpu_sync_create(&(*mem)->sync);
2192
2193
2194 /* Validate gws bo the first time it is added to process */
2195 mutex_lock(&(*mem)->process_info->lock);
2196 ret = amdgpu_bo_reserve(gws_bo, false);
2197 if (unlikely(ret)) {
2198 pr_err("Reserve gws bo failed %d\n", ret);
2199 goto bo_reservation_failure;
2200 }
2201
2202 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2203 if (ret) {
2204 pr_err("GWS BO validate failed %d\n", ret);
2205 goto bo_validation_failure;
2206 }
2207 /* GWS resource is shared b/t amdgpu and amdkfd
2208 * Add process eviction fence to bo so they can
2209 * evict each other.
2210 */
2211 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2212 if (ret)
2213 goto reserve_shared_fail;
2214 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2215 amdgpu_bo_unreserve(gws_bo);
2216 mutex_unlock(&(*mem)->process_info->lock);
2217
2218 return ret;
2219
2220 reserve_shared_fail:
2221 bo_validation_failure:
2222 amdgpu_bo_unreserve(gws_bo);
2223 bo_reservation_failure:
2224 mutex_unlock(&(*mem)->process_info->lock);
2225 amdgpu_sync_free(&(*mem)->sync);
2226 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2227 amdgpu_bo_unref(&gws_bo);
2228 mutex_destroy(&(*mem)->lock);
2229 kfree(*mem);
2230 *mem = NULL;
2231 return ret;
2232 }
2233
2234 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2235 {
2236 int ret;
2237 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2238 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2239 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2240
2241 /* Remove BO from process's validate list so restore worker won't touch
2242 * it anymore
2243 */
2244 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2245
2246 ret = amdgpu_bo_reserve(gws_bo, false);
2247 if (unlikely(ret)) {
2248 pr_err("Reserve gws bo failed %d\n", ret);
2249 //TODO add BO back to validate_list?
2250 return ret;
2251 }
2252 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2253 process_info->eviction_fence);
2254 amdgpu_bo_unreserve(gws_bo);
2255 amdgpu_sync_free(&kgd_mem->sync);
2256 amdgpu_bo_unref(&gws_bo);
2257 mutex_destroy(&kgd_mem->lock);
2258 kfree(mem);
2259 return 0;
2260 }
2261
2262 /* Returns GPU-specific tiling mode information */
2263 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2264 struct tile_config *config)
2265 {
2266 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2267
2268 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2269 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2270 config->num_tile_configs =
2271 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2272 config->macro_tile_config_ptr =
2273 adev->gfx.config.macrotile_mode_array;
2274 config->num_macro_tile_configs =
2275 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2276
2277 /* Those values are not set from GFX9 onwards */
2278 config->num_banks = adev->gfx.config.num_banks;
2279 config->num_ranks = adev->gfx.config.num_ranks;
2280
2281 return 0;
2282 }