]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drm/amdgpu: add some VM PD/PT iterators v2
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
f54d1867 28#include <linux/dma-fence-array.h>
a9f87f64 29#include <linux/interval_tree_generic.h>
02208441 30#include <linux/idr.h>
d38ceaf9
AD
31#include <drm/drmP.h>
32#include <drm/amdgpu_drm.h>
33#include "amdgpu.h"
34#include "amdgpu_trace.h"
ede0dd86 35#include "amdgpu_amdkfd.h"
c8c5e569 36#include "amdgpu_gmc.h"
d38ceaf9 37
7fc48e59
AG
38/**
39 * DOC: GPUVM
40 *
d38ceaf9
AD
41 * GPUVM is similar to the legacy gart on older asics, however
42 * rather than there being a single global gart table
43 * for the entire GPU, there are multiple VM page tables active
44 * at any given time. The VM page tables can contain a mix
45 * vram pages and system memory pages and system memory pages
46 * can be mapped as snooped (cached system pages) or unsnooped
47 * (uncached system pages).
48 * Each VM has an ID associated with it and there is a page table
49 * associated with each VMID. When execting a command buffer,
50 * the kernel tells the the ring what VMID to use for that command
51 * buffer. VMIDs are allocated dynamically as commands are submitted.
52 * The userspace drivers maintain their own address space and the kernel
53 * sets up their pages tables accordingly when they submit their
54 * command buffers and a VMID is assigned.
55 * Cayman/Trinity support up to 8 active VMs at any given time;
56 * SI supports 16.
57 */
58
a9f87f64
CK
59#define START(node) ((node)->start)
60#define LAST(node) ((node)->last)
61
62INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63 START, LAST, static, amdgpu_vm_it)
64
65#undef START
66#undef LAST
67
7fc48e59
AG
68/**
69 * struct amdgpu_pte_update_params - Local structure
70 *
71 * Encapsulate some VM table update parameters to reduce
f4833c4f 72 * the number of function parameters
7fc48e59 73 *
f4833c4f 74 */
29efc4f5 75struct amdgpu_pte_update_params {
7fc48e59
AG
76
77 /**
78 * @adev: amdgpu device we do this update for
79 */
27c5f36f 80 struct amdgpu_device *adev;
7fc48e59
AG
81
82 /**
83 * @vm: optional amdgpu_vm we do this update for
84 */
49ac8a24 85 struct amdgpu_vm *vm;
7fc48e59
AG
86
87 /**
88 * @src: address where to copy page table entries from
89 */
f4833c4f 90 uint64_t src;
7fc48e59
AG
91
92 /**
93 * @ib: indirect buffer to fill with commands
94 */
f4833c4f 95 struct amdgpu_ib *ib;
7fc48e59
AG
96
97 /**
98 * @func: Function which actually does the update
99 */
373ac645
CK
100 void (*func)(struct amdgpu_pte_update_params *params,
101 struct amdgpu_bo *bo, uint64_t pe,
afef8b8f 102 uint64_t addr, unsigned count, uint32_t incr,
6b777607 103 uint64_t flags);
7fc48e59
AG
104 /**
105 * @pages_addr:
106 *
107 * DMA addresses to use for mapping, used during VM update by CPU
b4d42511
HK
108 */
109 dma_addr_t *pages_addr;
7fc48e59
AG
110
111 /**
112 * @kptr:
113 *
114 * Kernel pointer of PD/PT BO that needs to be updated,
115 * used during VM update by CPU
116 */
b4d42511 117 void *kptr;
f4833c4f
HK
118};
119
7fc48e59
AG
120/**
121 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
122 */
284710fa 123struct amdgpu_prt_cb {
7fc48e59
AG
124
125 /**
126 * @adev: amdgpu device
127 */
284710fa 128 struct amdgpu_device *adev;
7fc48e59
AG
129
130 /**
131 * @cb: callback
132 */
284710fa
CK
133 struct dma_fence_cb cb;
134};
135
50783147
CK
136/**
137 * amdgpu_vm_level_shift - return the addr shift for each level
138 *
139 * @adev: amdgpu_device pointer
7fc48e59 140 * @level: VMPT level
50783147 141 *
7fc48e59
AG
142 * Returns:
143 * The number of bits the pfn needs to be right shifted for a level.
50783147
CK
144 */
145static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
146 unsigned level)
147{
196f7489
CZ
148 unsigned shift = 0xff;
149
150 switch (level) {
151 case AMDGPU_VM_PDB2:
152 case AMDGPU_VM_PDB1:
153 case AMDGPU_VM_PDB0:
154 shift = 9 * (AMDGPU_VM_PDB0 - level) +
50783147 155 adev->vm_manager.block_size;
196f7489
CZ
156 break;
157 case AMDGPU_VM_PTB:
158 shift = 0;
159 break;
160 default:
161 dev_err(adev->dev, "the level%d isn't supported.\n", level);
162 }
163
164 return shift;
50783147
CK
165}
166
d38ceaf9 167/**
72a7ec5c 168 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
d38ceaf9
AD
169 *
170 * @adev: amdgpu_device pointer
7fc48e59 171 * @level: VMPT level
d38ceaf9 172 *
7fc48e59
AG
173 * Returns:
174 * The number of entries in a page directory or page table.
d38ceaf9 175 */
72a7ec5c
CK
176static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
177 unsigned level)
d38ceaf9 178{
196f7489
CZ
179 unsigned shift = amdgpu_vm_level_shift(adev,
180 adev->vm_manager.root_level);
0410c5e5 181
196f7489 182 if (level == adev->vm_manager.root_level)
72a7ec5c 183 /* For the root directory */
0410c5e5 184 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
196f7489 185 else if (level != AMDGPU_VM_PTB)
0410c5e5
CK
186 /* Everything in between */
187 return 512;
188 else
72a7ec5c 189 /* For the page tables on the leaves */
36b32a68 190 return AMDGPU_VM_PTE_COUNT(adev);
d38ceaf9
AD
191}
192
193/**
72a7ec5c 194 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
d38ceaf9
AD
195 *
196 * @adev: amdgpu_device pointer
7fc48e59 197 * @level: VMPT level
d38ceaf9 198 *
7fc48e59
AG
199 * Returns:
200 * The size of the BO for a page directory or page table in bytes.
d38ceaf9 201 */
72a7ec5c 202static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
d38ceaf9 203{
72a7ec5c 204 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
d38ceaf9
AD
205}
206
bcdc9fd6
CK
207/**
208 * amdgpu_vm_bo_evicted - vm_bo is evicted
209 *
210 * @vm_bo: vm_bo which is evicted
211 *
212 * State for PDs/PTs and per VM BOs which are not at the location they should
213 * be.
214 */
215static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
216{
217 struct amdgpu_vm *vm = vm_bo->vm;
218 struct amdgpu_bo *bo = vm_bo->bo;
219
220 vm_bo->moved = true;
221 if (bo->tbo.type == ttm_bo_type_kernel)
222 list_move(&vm_bo->vm_status, &vm->evicted);
223 else
224 list_move_tail(&vm_bo->vm_status, &vm->evicted);
225}
226
227/**
228 * amdgpu_vm_bo_relocated - vm_bo is reloacted
229 *
230 * @vm_bo: vm_bo which is relocated
231 *
232 * State for PDs/PTs which needs to update their parent PD.
233 */
234static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
235{
236 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
237}
238
239/**
240 * amdgpu_vm_bo_moved - vm_bo is moved
241 *
242 * @vm_bo: vm_bo which is moved
243 *
244 * State for per VM BOs which are moved, but that change is not yet reflected
245 * in the page tables.
246 */
247static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
248{
249 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
250}
251
252/**
253 * amdgpu_vm_bo_idle - vm_bo is idle
254 *
255 * @vm_bo: vm_bo which is now idle
256 *
257 * State for PDs/PTs and per VM BOs which have gone through the state machine
258 * and are now idle.
259 */
260static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
261{
262 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
263 vm_bo->moved = false;
264}
265
266/**
267 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
268 *
269 * @vm_bo: vm_bo which is now invalidated
270 *
271 * State for normal BOs which are invalidated and that change not yet reflected
272 * in the PTs.
273 */
274static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
275{
276 spin_lock(&vm_bo->vm->invalidated_lock);
277 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
278 spin_unlock(&vm_bo->vm->invalidated_lock);
279}
280
281/**
282 * amdgpu_vm_bo_done - vm_bo is done
283 *
284 * @vm_bo: vm_bo which is now done
285 *
286 * State for normal BOs which are invalidated and that change has been updated
287 * in the PTs.
288 */
289static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
290{
291 spin_lock(&vm_bo->vm->invalidated_lock);
292 list_del_init(&vm_bo->vm_status);
293 spin_unlock(&vm_bo->vm->invalidated_lock);
294}
295
c460f8a6
CK
296/**
297 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
298 *
299 * @base: base structure for tracking BO usage in a VM
300 * @vm: vm to which bo is to be added
301 * @bo: amdgpu buffer object
302 *
303 * Initialize a bo_va_base structure and add it to the appropriate lists
304 *
305 */
306static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
307 struct amdgpu_vm *vm,
308 struct amdgpu_bo *bo)
309{
310 base->vm = vm;
311 base->bo = bo;
312 INIT_LIST_HEAD(&base->bo_list);
313 INIT_LIST_HEAD(&base->vm_status);
314
315 if (!bo)
316 return;
317 list_add_tail(&base->bo_list, &bo->va);
318
319 if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
320 return;
321
322 vm->bulk_moveable = false;
323 if (bo->tbo.type == ttm_bo_type_kernel)
bcdc9fd6 324 amdgpu_vm_bo_relocated(base);
c460f8a6 325 else
bcdc9fd6 326 amdgpu_vm_bo_idle(base);
c460f8a6
CK
327
328 if (bo->preferred_domains &
329 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
330 return;
331
332 /*
333 * we checked all the prerequisites, but it looks like this per vm bo
334 * is currently evicted. add the bo to the evicted list to make sure it
335 * is validated on next vm use to avoid fault.
336 * */
bcdc9fd6 337 amdgpu_vm_bo_evicted(base);
c460f8a6
CK
338}
339
ba79fde4
CK
340/**
341 * amdgpu_vm_pt_parent - get the parent page directory
342 *
343 * @pt: child page table
344 *
345 * Helper to get the parent entry for the child page table. NULL if we are at
346 * the root page directory.
347 */
348static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
349{
350 struct amdgpu_bo *parent = pt->base.bo->parent;
351
352 if (!parent)
353 return NULL;
354
355 return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list);
356}
357
73633e32
CK
358/**
359 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
360 */
361struct amdgpu_vm_pt_cursor {
362 uint64_t pfn;
363 struct amdgpu_vm_pt *parent;
364 struct amdgpu_vm_pt *entry;
365 unsigned level;
366};
367
368/**
369 * amdgpu_vm_pt_start - start PD/PT walk
370 *
371 * @adev: amdgpu_device pointer
372 * @vm: amdgpu_vm structure
373 * @start: start address of the walk
374 * @cursor: state to initialize
375 *
376 * Initialize a amdgpu_vm_pt_cursor to start a walk.
377 */
378static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
379 struct amdgpu_vm *vm, uint64_t start,
380 struct amdgpu_vm_pt_cursor *cursor)
381{
382 cursor->pfn = start;
383 cursor->parent = NULL;
384 cursor->entry = &vm->root;
385 cursor->level = adev->vm_manager.root_level;
386}
387
388/**
389 * amdgpu_vm_pt_descendant - go to child node
390 *
391 * @adev: amdgpu_device pointer
392 * @cursor: current state
393 *
394 * Walk to the child node of the current node.
395 * Returns:
396 * True if the walk was possible, false otherwise.
397 */
398static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
399 struct amdgpu_vm_pt_cursor *cursor)
400{
401 unsigned num_entries, shift, idx;
402
403 if (!cursor->entry->entries)
404 return false;
405
406 BUG_ON(!cursor->entry->base.bo);
407 num_entries = amdgpu_vm_num_entries(adev, cursor->level);
408 shift = amdgpu_vm_level_shift(adev, cursor->level);
409
410 ++cursor->level;
411 idx = (cursor->pfn >> shift) % num_entries;
412 cursor->parent = cursor->entry;
413 cursor->entry = &cursor->entry->entries[idx];
414 return true;
415}
416
417/**
418 * amdgpu_vm_pt_sibling - go to sibling node
419 *
420 * @adev: amdgpu_device pointer
421 * @cursor: current state
422 *
423 * Walk to the sibling node of the current node.
424 * Returns:
425 * True if the walk was possible, false otherwise.
426 */
427static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
428 struct amdgpu_vm_pt_cursor *cursor)
429{
430 unsigned shift, num_entries;
431
432 /* Root doesn't have a sibling */
433 if (!cursor->parent)
434 return false;
435
436 /* Go to our parents and see if we got a sibling */
437 shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
438 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
439
440 if (cursor->entry == &cursor->parent->entries[num_entries - 1])
441 return false;
442
443 cursor->pfn += 1ULL << shift;
444 cursor->pfn &= ~((1ULL << shift) - 1);
445 ++cursor->entry;
446 return true;
447}
448
449/**
450 * amdgpu_vm_pt_ancestor - go to parent node
451 *
452 * @cursor: current state
453 *
454 * Walk to the parent node of the current node.
455 * Returns:
456 * True if the walk was possible, false otherwise.
457 */
458static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
459{
460 if (!cursor->parent)
461 return false;
462
463 --cursor->level;
464 cursor->entry = cursor->parent;
465 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
466 return true;
467}
468
469/**
470 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
471 *
472 * @adev: amdgpu_device pointer
473 * @cursor: current state
474 *
475 * Walk the PD/PT tree to the next node.
476 */
477static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
478 struct amdgpu_vm_pt_cursor *cursor)
479{
480 /* First try a newborn child */
481 if (amdgpu_vm_pt_descendant(adev, cursor))
482 return;
483
484 /* If that didn't worked try to find a sibling */
485 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
486 /* No sibling, go to our parents and grandparents */
487 if (!amdgpu_vm_pt_ancestor(cursor)) {
488 cursor->pfn = ~0ll;
489 return;
490 }
491 }
492}
493
494/**
495 * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
496 *
497 * @adev: amdgpu_device pointer
498 * @vm: amdgpu_vm structure
499 * @start: start addr of the walk
500 * @cursor: state to initialize
501 *
502 * Start a walk and go directly to the leaf node.
503 */
504static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
505 struct amdgpu_vm *vm, uint64_t start,
506 struct amdgpu_vm_pt_cursor *cursor)
507{
508 amdgpu_vm_pt_start(adev, vm, start, cursor);
509 while (amdgpu_vm_pt_descendant(adev, cursor));
510}
511
512/**
513 * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
514 *
515 * @adev: amdgpu_device pointer
516 * @cursor: current state
517 *
518 * Walk the PD/PT tree to the next leaf node.
519 */
520static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
521 struct amdgpu_vm_pt_cursor *cursor)
522{
523 amdgpu_vm_pt_next(adev, cursor);
524 while (amdgpu_vm_pt_descendant(adev, cursor));
525}
526
527/**
528 * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
529 */
530#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
531 for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
532 (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
533
534/**
535 * amdgpu_vm_pt_first_dfs - start a deep first search
536 *
537 * @adev: amdgpu_device structure
538 * @vm: amdgpu_vm structure
539 * @cursor: state to initialize
540 *
541 * Starts a deep first traversal of the PD/PT tree.
542 */
543static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
544 struct amdgpu_vm *vm,
545 struct amdgpu_vm_pt_cursor *cursor)
546{
547 amdgpu_vm_pt_start(adev, vm, 0, cursor);
548 while (amdgpu_vm_pt_descendant(adev, cursor));
549}
550
551/**
552 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
553 *
554 * @adev: amdgpu_device structure
555 * @cursor: current state
556 *
557 * Move the cursor to the next node in a deep first search.
558 */
559static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
560 struct amdgpu_vm_pt_cursor *cursor)
561{
562 if (!cursor->entry)
563 return;
564
565 if (!cursor->parent)
566 cursor->entry = NULL;
567 else if (amdgpu_vm_pt_sibling(adev, cursor))
568 while (amdgpu_vm_pt_descendant(adev, cursor));
569 else
570 amdgpu_vm_pt_ancestor(cursor);
571}
572
573/**
574 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
575 */
576#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
577 for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
578 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
579 (entry); (entry) = (cursor).entry, \
580 amdgpu_vm_pt_next_dfs((adev), &(cursor)))
581
d38ceaf9 582/**
56467ebf 583 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
d38ceaf9
AD
584 *
585 * @vm: vm providing the BOs
3c0eea6c 586 * @validated: head of validation list
56467ebf 587 * @entry: entry to add
d38ceaf9
AD
588 *
589 * Add the page directory to the list of BOs to
56467ebf 590 * validate for command submission.
d38ceaf9 591 */
56467ebf
CK
592void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
593 struct list_head *validated,
594 struct amdgpu_bo_list_entry *entry)
d38ceaf9 595{
3f3333f8 596 entry->robj = vm->root.base.bo;
56467ebf 597 entry->priority = 0;
67003a15 598 entry->tv.bo = &entry->robj->tbo;
56467ebf 599 entry->tv.shared = true;
2f568dbd 600 entry->user_pages = NULL;
56467ebf
CK
601 list_add(&entry->tv.head, validated);
602}
d38ceaf9 603
f921661b
HR
604/**
605 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
606 *
607 * @adev: amdgpu device pointer
608 * @vm: vm providing the BOs
609 *
610 * Move all BOs to the end of LRU and remember their positions to put them
611 * together.
612 */
613void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
614 struct amdgpu_vm *vm)
615{
616 struct ttm_bo_global *glob = adev->mman.bdev.glob;
617 struct amdgpu_vm_bo_base *bo_base;
618
619 if (vm->bulk_moveable) {
620 spin_lock(&glob->lru_lock);
621 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
622 spin_unlock(&glob->lru_lock);
623 return;
624 }
625
626 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
627
628 spin_lock(&glob->lru_lock);
629 list_for_each_entry(bo_base, &vm->idle, vm_status) {
630 struct amdgpu_bo *bo = bo_base->bo;
631
632 if (!bo->parent)
633 continue;
634
635 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
636 if (bo->shadow)
637 ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
638 &vm->lru_bulk_move);
639 }
640 spin_unlock(&glob->lru_lock);
641
642 vm->bulk_moveable = true;
643}
644
670fecc8 645/**
f7da30d9 646 * amdgpu_vm_validate_pt_bos - validate the page table BOs
670fecc8 647 *
5a712a87 648 * @adev: amdgpu device pointer
56467ebf 649 * @vm: vm providing the BOs
670fecc8
CK
650 * @validate: callback to do the validation
651 * @param: parameter for the validation callback
652 *
653 * Validate the page table BOs on command submission if neccessary.
7fc48e59
AG
654 *
655 * Returns:
656 * Validation result.
670fecc8 657 */
f7da30d9
CK
658int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
659 int (*validate)(void *p, struct amdgpu_bo *bo),
660 void *param)
670fecc8 661{
91ccdd24
CK
662 struct amdgpu_vm_bo_base *bo_base, *tmp;
663 int r = 0;
670fecc8 664
f921661b
HR
665 vm->bulk_moveable &= list_empty(&vm->evicted);
666
91ccdd24
CK
667 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
668 struct amdgpu_bo *bo = bo_base->bo;
670fecc8 669
262b9c39
CK
670 r = validate(param, bo);
671 if (r)
672 break;
670fecc8 673
af4c0f65 674 if (bo->tbo.type != ttm_bo_type_kernel) {
bcdc9fd6 675 amdgpu_vm_bo_moved(bo_base);
af4c0f65 676 } else {
17cc5252
CK
677 if (vm->use_cpu_for_update)
678 r = amdgpu_bo_kmap(bo, NULL);
679 else
680 r = amdgpu_ttm_alloc_gart(&bo->tbo);
284dec43
CK
681 if (r)
682 break;
3d5fe658
CK
683 if (bo->shadow) {
684 r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
685 if (r)
686 break;
687 }
bcdc9fd6 688 amdgpu_vm_bo_relocated(bo_base);
af4c0f65 689 }
670fecc8
CK
690 }
691
91ccdd24 692 return r;
670fecc8
CK
693}
694
56467ebf 695/**
34d7be5d 696 * amdgpu_vm_ready - check VM is ready for updates
56467ebf 697 *
34d7be5d 698 * @vm: VM to check
d38ceaf9 699 *
34d7be5d 700 * Check if all VM PDs/PTs are ready for updates
7fc48e59
AG
701 *
702 * Returns:
703 * True if eviction list is empty.
d38ceaf9 704 */
3f3333f8 705bool amdgpu_vm_ready(struct amdgpu_vm *vm)
d38ceaf9 706{
af4c0f65 707 return list_empty(&vm->evicted);
d711e139
CK
708}
709
13307f7e
CK
710/**
711 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
712 *
713 * @adev: amdgpu_device pointer
7fc48e59 714 * @vm: VM to clear BO from
13307f7e
CK
715 * @bo: BO to clear
716 * @level: level this BO is at
00553cf8 717 * @pte_support_ats: indicate ATS support from PTE
13307f7e
CK
718 *
719 * Root PD needs to be reserved when calling this.
7fc48e59
AG
720 *
721 * Returns:
722 * 0 on success, errno otherwise.
13307f7e
CK
723 */
724static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
4584312d
CK
725 struct amdgpu_vm *vm, struct amdgpu_bo *bo,
726 unsigned level, bool pte_support_ats)
13307f7e
CK
727{
728 struct ttm_operation_ctx ctx = { true, false };
729 struct dma_fence *fence = NULL;
4584312d 730 unsigned entries, ats_entries;
13307f7e
CK
731 struct amdgpu_ring *ring;
732 struct amdgpu_job *job;
4584312d 733 uint64_t addr;
13307f7e
CK
734 int r;
735
4584312d
CK
736 entries = amdgpu_bo_size(bo) / 8;
737
738 if (pte_support_ats) {
739 if (level == adev->vm_manager.root_level) {
740 ats_entries = amdgpu_vm_level_shift(adev, level);
741 ats_entries += AMDGPU_GPU_PAGE_SHIFT;
ad9a5b78 742 ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
4584312d
CK
743 ats_entries = min(ats_entries, entries);
744 entries -= ats_entries;
745 } else {
746 ats_entries = entries;
747 entries = 0;
748 }
13307f7e 749 } else {
4584312d 750 ats_entries = 0;
13307f7e
CK
751 }
752
068c3304 753 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
13307f7e
CK
754
755 r = reservation_object_reserve_shared(bo->tbo.resv);
756 if (r)
757 return r;
758
759 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
760 if (r)
761 goto error;
762
284dec43
CK
763 r = amdgpu_ttm_alloc_gart(&bo->tbo);
764 if (r)
765 return r;
766
13307f7e
CK
767 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
768 if (r)
769 goto error;
770
1cadf2b3 771 addr = amdgpu_bo_gpu_offset(bo);
4584312d
CK
772 if (ats_entries) {
773 uint64_t ats_value;
774
775 ats_value = AMDGPU_PTE_DEFAULT_ATC;
776 if (level != AMDGPU_VM_PTB)
777 ats_value |= AMDGPU_PDE_PTE;
778
779 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
780 ats_entries, 0, ats_value);
781 addr += ats_entries * 8;
782 }
783
784 if (entries)
785 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
786 entries, 0, 0);
787
13307f7e
CK
788 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
789
790 WARN_ON(job->ibs[0].length_dw > 64);
29e8357b
CK
791 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
792 AMDGPU_FENCE_OWNER_UNDEFINED, false);
793 if (r)
794 goto error_free;
795
0e28b10f
CK
796 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
797 &fence);
13307f7e
CK
798 if (r)
799 goto error_free;
800
801 amdgpu_bo_fence(bo, fence, true);
802 dma_fence_put(fence);
e61736da
CK
803
804 if (bo->shadow)
805 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
806 level, pte_support_ats);
807
13307f7e
CK
808 return 0;
809
810error_free:
811 amdgpu_job_free(job);
812
813error:
814 return r;
815}
816
e21eb261
CK
817/**
818 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
819 *
820 * @adev: amdgpu_device pointer
821 * @vm: requesting vm
822 * @bp: resulting BO allocation parameters
823 */
824static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
825 int level, struct amdgpu_bo_param *bp)
826{
827 memset(bp, 0, sizeof(*bp));
828
829 bp->size = amdgpu_vm_bo_size(adev, level);
830 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
831 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
284dec43
CK
832 if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
833 adev->flags & AMD_IS_APU)
834 bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
835 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
836 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
837 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
e21eb261
CK
838 if (vm->use_cpu_for_update)
839 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
03e9dee1
FK
840 else if (!vm->root.base.bo || vm->root.base.bo->shadow)
841 bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
e21eb261
CK
842 bp->type = ttm_bo_type_kernel;
843 if (vm->root.base.bo)
844 bp->resv = vm->root.base.bo->tbo.resv;
845}
846
d711e139 847/**
f566ceb1
CK
848 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
849 *
850 * @adev: amdgpu_device pointer
851 * @vm: requested vm
7fc48e59 852 * @parent: parent PT
f566ceb1
CK
853 * @saddr: start of the address range
854 * @eaddr: end of the address range
7fc48e59
AG
855 * @level: VMPT level
856 * @ats: indicate ATS support from PTE
f566ceb1
CK
857 *
858 * Make sure the page directories and page tables are allocated
7fc48e59
AG
859 *
860 * Returns:
861 * 0 on success, errno otherwise.
f566ceb1
CK
862 */
863static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
864 struct amdgpu_vm *vm,
865 struct amdgpu_vm_pt *parent,
866 uint64_t saddr, uint64_t eaddr,
4584312d 867 unsigned level, bool ats)
f566ceb1 868{
50783147 869 unsigned shift = amdgpu_vm_level_shift(adev, level);
e21eb261 870 struct amdgpu_bo_param bp;
f566ceb1 871 unsigned pt_idx, from, to;
13307f7e 872 int r;
f566ceb1
CK
873
874 if (!parent->entries) {
875 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
876
2098105e
MH
877 parent->entries = kvmalloc_array(num_entries,
878 sizeof(struct amdgpu_vm_pt),
879 GFP_KERNEL | __GFP_ZERO);
f566ceb1
CK
880 if (!parent->entries)
881 return -ENOMEM;
f566ceb1
CK
882 }
883
1866bac8
FK
884 from = saddr >> shift;
885 to = eaddr >> shift;
886 if (from >= amdgpu_vm_num_entries(adev, level) ||
887 to >= amdgpu_vm_num_entries(adev, level))
888 return -EINVAL;
f566ceb1 889
f566ceb1 890 ++level;
1866bac8
FK
891 saddr = saddr & ((1 << shift) - 1);
892 eaddr = eaddr & ((1 << shift) - 1);
f566ceb1 893
e21eb261 894 amdgpu_vm_bo_param(adev, vm, level, &bp);
3c824172 895
f566ceb1
CK
896 /* walk over the address space and allocate the page tables */
897 for (pt_idx = from; pt_idx <= to; ++pt_idx) {
f566ceb1
CK
898 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
899 struct amdgpu_bo *pt;
900
3f3333f8 901 if (!entry->base.bo) {
3216c6b7 902 r = amdgpu_bo_create(adev, &bp, &pt);
f566ceb1
CK
903 if (r)
904 return r;
905
4584312d 906 r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
13307f7e 907 if (r) {
e5197a4c 908 amdgpu_bo_unref(&pt->shadow);
13307f7e
CK
909 amdgpu_bo_unref(&pt);
910 return r;
911 }
912
0a096fb6
CK
913 if (vm->use_cpu_for_update) {
914 r = amdgpu_bo_kmap(pt, NULL);
915 if (r) {
e5197a4c 916 amdgpu_bo_unref(&pt->shadow);
0a096fb6
CK
917 amdgpu_bo_unref(&pt);
918 return r;
919 }
920 }
921
f566ceb1
CK
922 /* Keep a reference to the root directory to avoid
923 * freeing them up in the wrong order.
924 */
0f2fc435 925 pt->parent = amdgpu_bo_ref(parent->base.bo);
f566ceb1 926
3f4299be 927 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
f566ceb1
CK
928 }
929
196f7489 930 if (level < AMDGPU_VM_PTB) {
1866bac8
FK
931 uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
932 uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
933 ((1 << shift) - 1);
934 r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
4584312d 935 sub_eaddr, level, ats);
f566ceb1
CK
936 if (r)
937 return r;
938 }
939 }
940
941 return 0;
942}
943
663e4577
CK
944/**
945 * amdgpu_vm_alloc_pts - Allocate page tables.
946 *
947 * @adev: amdgpu_device pointer
948 * @vm: VM to allocate page tables for
949 * @saddr: Start address which needs to be allocated
950 * @size: Size from start address we need.
951 *
952 * Make sure the page tables are allocated.
7fc48e59
AG
953 *
954 * Returns:
955 * 0 on success, errno otherwise.
663e4577
CK
956 */
957int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
958 struct amdgpu_vm *vm,
959 uint64_t saddr, uint64_t size)
960{
663e4577 961 uint64_t eaddr;
4584312d 962 bool ats = false;
663e4577
CK
963
964 /* validate the parameters */
965 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
966 return -EINVAL;
967
968 eaddr = saddr + size - 1;
4584312d
CK
969
970 if (vm->pte_support_ats)
ad9a5b78 971 ats = saddr < AMDGPU_GMC_HOLE_START;
663e4577
CK
972
973 saddr /= AMDGPU_GPU_PAGE_SIZE;
974 eaddr /= AMDGPU_GPU_PAGE_SIZE;
975
4584312d
CK
976 if (eaddr >= adev->vm_manager.max_pfn) {
977 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
978 eaddr, adev->vm_manager.max_pfn);
979 return -EINVAL;
980 }
981
196f7489 982 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
4584312d 983 adev->vm_manager.root_level, ats);
663e4577
CK
984}
985
e59c0205
AX
986/**
987 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
988 *
989 * @adev: amdgpu_device pointer
990 */
991void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
93dcc37d 992{
a1255107 993 const struct amdgpu_ip_block *ip_block;
e59c0205
AX
994 bool has_compute_vm_bug;
995 struct amdgpu_ring *ring;
996 int i;
93dcc37d 997
e59c0205 998 has_compute_vm_bug = false;
93dcc37d 999
2990a1fc 1000 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
e59c0205
AX
1001 if (ip_block) {
1002 /* Compute has a VM bug for GFX version < 7.
1003 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
1004 if (ip_block->version->major <= 7)
1005 has_compute_vm_bug = true;
1006 else if (ip_block->version->major == 8)
1007 if (adev->gfx.mec_fw_version < 673)
1008 has_compute_vm_bug = true;
1009 }
93dcc37d 1010
e59c0205
AX
1011 for (i = 0; i < adev->num_rings; i++) {
1012 ring = adev->rings[i];
1013 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1014 /* only compute rings */
1015 ring->has_compute_vm_bug = has_compute_vm_bug;
93dcc37d 1016 else
e59c0205 1017 ring->has_compute_vm_bug = false;
93dcc37d 1018 }
93dcc37d
AD
1019}
1020
7fc48e59
AG
1021/**
1022 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
1023 *
1024 * @ring: ring on which the job will be submitted
1025 * @job: job to submit
1026 *
1027 * Returns:
1028 * True if sync is needed.
1029 */
b9bf33d5
CZ
1030bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1031 struct amdgpu_job *job)
e60f8db5 1032{
b9bf33d5
CZ
1033 struct amdgpu_device *adev = ring->adev;
1034 unsigned vmhub = ring->funcs->vmhub;
620f774f
CK
1035 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1036 struct amdgpu_vmid *id;
b9bf33d5 1037 bool gds_switch_needed;
e59c0205 1038 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
b9bf33d5 1039
c4f46f22 1040 if (job->vmid == 0)
b9bf33d5 1041 return false;
c4f46f22 1042 id = &id_mgr->ids[job->vmid];
b9bf33d5
CZ
1043 gds_switch_needed = ring->funcs->emit_gds_switch && (
1044 id->gds_base != job->gds_base ||
1045 id->gds_size != job->gds_size ||
1046 id->gws_base != job->gws_base ||
1047 id->gws_size != job->gws_size ||
1048 id->oa_base != job->oa_base ||
1049 id->oa_size != job->oa_size);
e60f8db5 1050
620f774f 1051 if (amdgpu_vmid_had_gpu_reset(adev, id))
b9bf33d5 1052 return true;
e60f8db5 1053
bb37b67d 1054 return vm_flush_needed || gds_switch_needed;
b9bf33d5
CZ
1055}
1056
d38ceaf9
AD
1057/**
1058 * amdgpu_vm_flush - hardware flush the vm
1059 *
1060 * @ring: ring to use for flush
00553cf8 1061 * @job: related job
7fc48e59 1062 * @need_pipe_sync: is pipe sync needed
d38ceaf9 1063 *
4ff37a83 1064 * Emit a VM flush when it is necessary.
7fc48e59
AG
1065 *
1066 * Returns:
1067 * 0 on success, errno otherwise.
d38ceaf9 1068 */
8fdf074f 1069int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
d38ceaf9 1070{
971fe9a9 1071 struct amdgpu_device *adev = ring->adev;
7645670d 1072 unsigned vmhub = ring->funcs->vmhub;
620f774f 1073 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
c4f46f22 1074 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
d564a06e 1075 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
fd53be30
CZ
1076 id->gds_base != job->gds_base ||
1077 id->gds_size != job->gds_size ||
1078 id->gws_base != job->gws_base ||
1079 id->gws_size != job->gws_size ||
1080 id->oa_base != job->oa_base ||
1081 id->oa_size != job->oa_size);
de37e68a 1082 bool vm_flush_needed = job->vm_needs_flush;
b3cd285f
CK
1083 bool pasid_mapping_needed = id->pasid != job->pasid ||
1084 !id->pasid_mapping ||
1085 !dma_fence_is_signaled(id->pasid_mapping);
1086 struct dma_fence *fence = NULL;
c0e51931 1087 unsigned patch_offset = 0;
41d9eb2c 1088 int r;
d564a06e 1089
620f774f 1090 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
f7d015b9
CK
1091 gds_switch_needed = true;
1092 vm_flush_needed = true;
b3cd285f 1093 pasid_mapping_needed = true;
f7d015b9 1094 }
971fe9a9 1095
b3cd285f 1096 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
d8de8260
AG
1097 vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
1098 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
b3cd285f
CK
1099 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1100 ring->funcs->emit_wreg;
1101
8fdf074f 1102 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
f7d015b9 1103 return 0;
41d9eb2c 1104
c0e51931
CK
1105 if (ring->funcs->init_cond_exec)
1106 patch_offset = amdgpu_ring_init_cond_exec(ring);
41d9eb2c 1107
8fdf074f
ML
1108 if (need_pipe_sync)
1109 amdgpu_ring_emit_pipeline_sync(ring);
1110
b3cd285f 1111 if (vm_flush_needed) {
c4f46f22 1112 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
c633c00b 1113 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
b3cd285f
CK
1114 }
1115
1116 if (pasid_mapping_needed)
1117 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
e9d672b2 1118
b3cd285f 1119 if (vm_flush_needed || pasid_mapping_needed) {
d240cd9e 1120 r = amdgpu_fence_emit(ring, &fence, 0);
c0e51931
CK
1121 if (r)
1122 return r;
b3cd285f 1123 }
e9d672b2 1124
b3cd285f 1125 if (vm_flush_needed) {
7645670d 1126 mutex_lock(&id_mgr->lock);
c0e51931 1127 dma_fence_put(id->last_flush);
b3cd285f
CK
1128 id->last_flush = dma_fence_get(fence);
1129 id->current_gpu_reset_count =
1130 atomic_read(&adev->gpu_reset_counter);
7645670d 1131 mutex_unlock(&id_mgr->lock);
c0e51931 1132 }
e9d672b2 1133
b3cd285f
CK
1134 if (pasid_mapping_needed) {
1135 id->pasid = job->pasid;
1136 dma_fence_put(id->pasid_mapping);
1137 id->pasid_mapping = dma_fence_get(fence);
1138 }
1139 dma_fence_put(fence);
1140
7c4378f4 1141 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
c0e51931
CK
1142 id->gds_base = job->gds_base;
1143 id->gds_size = job->gds_size;
1144 id->gws_base = job->gws_base;
1145 id->gws_size = job->gws_size;
1146 id->oa_base = job->oa_base;
1147 id->oa_size = job->oa_size;
c4f46f22 1148 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
c0e51931
CK
1149 job->gds_size, job->gws_base,
1150 job->gws_size, job->oa_base,
1151 job->oa_size);
1152 }
1153
1154 if (ring->funcs->patch_cond_exec)
1155 amdgpu_ring_patch_cond_exec(ring, patch_offset);
1156
1157 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1158 if (ring->funcs->emit_switch_buffer) {
1159 amdgpu_ring_emit_switch_buffer(ring);
1160 amdgpu_ring_emit_switch_buffer(ring);
e9d672b2 1161 }
41d9eb2c 1162 return 0;
971fe9a9
CK
1163}
1164
d38ceaf9
AD
1165/**
1166 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1167 *
1168 * @vm: requested vm
1169 * @bo: requested buffer object
1170 *
8843dbbb 1171 * Find @bo inside the requested vm.
d38ceaf9
AD
1172 * Search inside the @bos vm list for the requested vm
1173 * Returns the found bo_va or NULL if none is found
1174 *
1175 * Object has to be reserved!
7fc48e59
AG
1176 *
1177 * Returns:
1178 * Found bo_va or NULL.
d38ceaf9
AD
1179 */
1180struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1181 struct amdgpu_bo *bo)
1182{
1183 struct amdgpu_bo_va *bo_va;
1184
ec681545
CK
1185 list_for_each_entry(bo_va, &bo->va, base.bo_list) {
1186 if (bo_va->base.vm == vm) {
d38ceaf9
AD
1187 return bo_va;
1188 }
1189 }
1190 return NULL;
1191}
1192
1193/**
afef8b8f 1194 * amdgpu_vm_do_set_ptes - helper to call the right asic function
d38ceaf9 1195 *
29efc4f5 1196 * @params: see amdgpu_pte_update_params definition
373ac645 1197 * @bo: PD/PT to update
d38ceaf9
AD
1198 * @pe: addr of the page entry
1199 * @addr: dst addr to write into pe
1200 * @count: number of page entries to update
1201 * @incr: increase next addr by incr bytes
1202 * @flags: hw access flags
d38ceaf9
AD
1203 *
1204 * Traces the parameters and calls the right asic functions
1205 * to setup the page table using the DMA.
1206 */
afef8b8f 1207static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
373ac645 1208 struct amdgpu_bo *bo,
afef8b8f
CK
1209 uint64_t pe, uint64_t addr,
1210 unsigned count, uint32_t incr,
6b777607 1211 uint64_t flags)
d38ceaf9 1212{
373ac645 1213 pe += amdgpu_bo_gpu_offset(bo);
ec2f05f0 1214 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
d38ceaf9 1215
afef8b8f 1216 if (count < 3) {
de9ea7bd
CK
1217 amdgpu_vm_write_pte(params->adev, params->ib, pe,
1218 addr | flags, count, incr);
d38ceaf9
AD
1219
1220 } else {
27c5f36f 1221 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
d38ceaf9
AD
1222 count, incr, flags);
1223 }
1224}
1225
afef8b8f
CK
1226/**
1227 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
1228 *
1229 * @params: see amdgpu_pte_update_params definition
373ac645 1230 * @bo: PD/PT to update
afef8b8f
CK
1231 * @pe: addr of the page entry
1232 * @addr: dst addr to write into pe
1233 * @count: number of page entries to update
1234 * @incr: increase next addr by incr bytes
1235 * @flags: hw access flags
1236 *
1237 * Traces the parameters and calls the DMA function to copy the PTEs.
1238 */
1239static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
373ac645 1240 struct amdgpu_bo *bo,
afef8b8f
CK
1241 uint64_t pe, uint64_t addr,
1242 unsigned count, uint32_t incr,
6b777607 1243 uint64_t flags)
afef8b8f 1244{
ec2f05f0 1245 uint64_t src = (params->src + (addr >> 12) * 8);
afef8b8f 1246
373ac645 1247 pe += amdgpu_bo_gpu_offset(bo);
ec2f05f0
CK
1248 trace_amdgpu_vm_copy_ptes(pe, src, count);
1249
1250 amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
afef8b8f
CK
1251}
1252
d38ceaf9 1253/**
b07c9d2a 1254 * amdgpu_vm_map_gart - Resolve gart mapping of addr
d38ceaf9 1255 *
b07c9d2a 1256 * @pages_addr: optional DMA address to use for lookup
d38ceaf9
AD
1257 * @addr: the unmapped addr
1258 *
1259 * Look up the physical address of the page that the pte resolves
7fc48e59
AG
1260 * to.
1261 *
1262 * Returns:
1263 * The pointer for the page table entry.
d38ceaf9 1264 */
de9ea7bd 1265static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
d38ceaf9
AD
1266{
1267 uint64_t result;
1268
de9ea7bd
CK
1269 /* page table offset */
1270 result = pages_addr[addr >> PAGE_SHIFT];
b07c9d2a 1271
de9ea7bd
CK
1272 /* in case cpu page size != gpu page size*/
1273 result |= addr & (~PAGE_MASK);
d38ceaf9 1274
b07c9d2a 1275 result &= 0xFFFFFFFFFFFFF000ULL;
d38ceaf9
AD
1276
1277 return result;
1278}
1279
3c824172
HK
1280/**
1281 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
1282 *
1283 * @params: see amdgpu_pte_update_params definition
373ac645 1284 * @bo: PD/PT to update
3c824172
HK
1285 * @pe: kmap addr of the page entry
1286 * @addr: dst addr to write into pe
1287 * @count: number of page entries to update
1288 * @incr: increase next addr by incr bytes
1289 * @flags: hw access flags
1290 *
1291 * Write count number of PT/PD entries directly.
1292 */
1293static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
373ac645 1294 struct amdgpu_bo *bo,
3c824172
HK
1295 uint64_t pe, uint64_t addr,
1296 unsigned count, uint32_t incr,
1297 uint64_t flags)
1298{
1299 unsigned int i;
b4d42511 1300 uint64_t value;
3c824172 1301
373ac645
CK
1302 pe += (unsigned long)amdgpu_bo_kptr(bo);
1303
03918b36
CK
1304 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
1305
3c824172 1306 for (i = 0; i < count; i++) {
b4d42511
HK
1307 value = params->pages_addr ?
1308 amdgpu_vm_map_gart(params->pages_addr, addr) :
1309 addr;
132f34e4
CK
1310 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
1311 i, value, flags);
3c824172
HK
1312 addr += incr;
1313 }
3c824172
HK
1314}
1315
7fc48e59
AG
1316
1317/**
1318 * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
1319 *
1320 * @adev: amdgpu_device pointer
1321 * @vm: related vm
1322 * @owner: fence owner
1323 *
1324 * Returns:
1325 * 0 on success, errno otherwise.
1326 */
a33cab7a
CK
1327static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1328 void *owner)
3c824172
HK
1329{
1330 struct amdgpu_sync sync;
1331 int r;
1332
1333 amdgpu_sync_create(&sync);
177ae09b 1334 amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
3c824172
HK
1335 r = amdgpu_sync_wait(&sync, true);
1336 amdgpu_sync_free(&sync);
1337
1338 return r;
1339}
1340
1c860a02
CK
1341/**
1342 * amdgpu_vm_update_func - helper to call update function
1343 *
1344 * Calls the update function for both the given BO as well as its shadow.
1345 */
1346static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
1347 struct amdgpu_bo *bo,
1348 uint64_t pe, uint64_t addr,
1349 unsigned count, uint32_t incr,
1350 uint64_t flags)
1351{
1352 if (bo->shadow)
1353 params->func(params, bo->shadow, pe, addr, count, incr, flags);
1354 params->func(params, bo, pe, addr, count, incr, flags);
1355}
1356
f8991bab 1357/*
6989f246 1358 * amdgpu_vm_update_pde - update a single level in the hierarchy
f8991bab 1359 *
6989f246 1360 * @param: parameters for the update
f8991bab 1361 * @vm: requested vm
194d2161 1362 * @parent: parent directory
6989f246 1363 * @entry: entry to update
f8991bab 1364 *
6989f246 1365 * Makes sure the requested entry in parent is up to date.
f8991bab 1366 */
6989f246
CK
1367static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
1368 struct amdgpu_vm *vm,
1369 struct amdgpu_vm_pt *parent,
1370 struct amdgpu_vm_pt *entry)
d38ceaf9 1371{
373ac645 1372 struct amdgpu_bo *bo = parent->base.bo, *pbo;
3de676d8
CK
1373 uint64_t pde, pt, flags;
1374 unsigned level;
d5fc5e82 1375
6989f246
CK
1376 /* Don't update huge pages here */
1377 if (entry->huge)
1378 return;
d38ceaf9 1379
373ac645 1380 for (level = 0, pbo = bo->parent; pbo; ++level)
3de676d8
CK
1381 pbo = pbo->parent;
1382
196f7489 1383 level += params->adev->vm_manager.root_level;
24a8d289 1384 amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
373ac645 1385 pde = (entry - parent->entries) * 8;
1c860a02 1386 amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
d38ceaf9
AD
1387}
1388
92456b93
CK
1389/*
1390 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1391 *
7fc48e59
AG
1392 * @adev: amdgpu_device pointer
1393 * @vm: related vm
92456b93 1394 * @parent: parent PD
7fc48e59 1395 * @level: VMPT level
92456b93
CK
1396 *
1397 * Mark all PD level as invalid after an error.
1398 */
8f19cd78
CK
1399static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1400 struct amdgpu_vm *vm,
1401 struct amdgpu_vm_pt *parent,
1402 unsigned level)
92456b93 1403{
8f19cd78 1404 unsigned pt_idx, num_entries;
92456b93
CK
1405
1406 /*
1407 * Recurse into the subdirectories. This recursion is harmless because
1408 * we only have a maximum of 5 layers.
1409 */
8f19cd78
CK
1410 num_entries = amdgpu_vm_num_entries(adev, level);
1411 for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
92456b93
CK
1412 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1413
3f3333f8 1414 if (!entry->base.bo)
92456b93
CK
1415 continue;
1416
862b8c57 1417 if (!entry->base.moved)
bcdc9fd6 1418 amdgpu_vm_bo_relocated(&entry->base);
8f19cd78 1419 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
92456b93
CK
1420 }
1421}
1422
194d2161
CK
1423/*
1424 * amdgpu_vm_update_directories - make sure that all directories are valid
1425 *
1426 * @adev: amdgpu_device pointer
1427 * @vm: requested vm
1428 *
1429 * Makes sure all directories are up to date.
7fc48e59
AG
1430 *
1431 * Returns:
1432 * 0 for success, error for failure.
194d2161
CK
1433 */
1434int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1435 struct amdgpu_vm *vm)
1436{
6989f246
CK
1437 struct amdgpu_pte_update_params params;
1438 struct amdgpu_job *job;
1439 unsigned ndw = 0;
78aa02c7 1440 int r = 0;
92456b93 1441
6989f246
CK
1442 if (list_empty(&vm->relocated))
1443 return 0;
1444
1445restart:
1446 memset(&params, 0, sizeof(params));
1447 params.adev = adev;
1448
1449 if (vm->use_cpu_for_update) {
1450 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1451 if (unlikely(r))
1452 return r;
1453
1454 params.func = amdgpu_vm_cpu_set_ptes;
1455 } else {
1456 ndw = 512 * 8;
1457 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1458 if (r)
1459 return r;
1460
1461 params.ib = &job->ibs[0];
1462 params.func = amdgpu_vm_do_set_ptes;
1463 }
1464
ea09729c 1465 while (!list_empty(&vm->relocated)) {
6989f246 1466 struct amdgpu_vm_pt *pt, *entry;
ea09729c 1467
ba79fde4
CK
1468 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1469 base.vm_status);
1470 amdgpu_vm_bo_idle(&entry->base);
ea09729c 1471
ba79fde4
CK
1472 pt = amdgpu_vm_pt_parent(entry);
1473 if (!pt)
6989f246 1474 continue;
6989f246 1475
6989f246
CK
1476 amdgpu_vm_update_pde(&params, vm, pt, entry);
1477
6989f246
CK
1478 if (!vm->use_cpu_for_update &&
1479 (ndw - params.ib->length_dw) < 32)
1480 break;
ea09729c 1481 }
92456b93 1482
68c62306
CK
1483 if (vm->use_cpu_for_update) {
1484 /* Flush HDP */
1485 mb();
69882565 1486 amdgpu_asic_flush_hdp(adev, NULL);
6989f246
CK
1487 } else if (params.ib->length_dw == 0) {
1488 amdgpu_job_free(job);
1489 } else {
1490 struct amdgpu_bo *root = vm->root.base.bo;
1491 struct amdgpu_ring *ring;
1492 struct dma_fence *fence;
1493
068c3304 1494 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
6989f246
CK
1495 sched);
1496
1497 amdgpu_ring_pad_ib(ring, params.ib);
1498 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1499 AMDGPU_FENCE_OWNER_VM, false);
6989f246 1500 WARN_ON(params.ib->length_dw > ndw);
0e28b10f
CK
1501 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1502 &fence);
6989f246
CK
1503 if (r)
1504 goto error;
1505
1506 amdgpu_bo_fence(root, fence, true);
1507 dma_fence_put(vm->last_update);
1508 vm->last_update = fence;
68c62306
CK
1509 }
1510
6989f246
CK
1511 if (!list_empty(&vm->relocated))
1512 goto restart;
1513
1514 return 0;
1515
1516error:
196f7489
CZ
1517 amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1518 adev->vm_manager.root_level);
6989f246 1519 amdgpu_job_free(job);
92456b93 1520 return r;
194d2161
CK
1521}
1522
4e2cb640 1523/**
cf2f0a37 1524 * amdgpu_vm_find_entry - find the entry for an address
4e2cb640
CK
1525 *
1526 * @p: see amdgpu_pte_update_params definition
1527 * @addr: virtual address in question
cf2f0a37
AD
1528 * @entry: resulting entry or NULL
1529 * @parent: parent entry
4e2cb640 1530 *
cf2f0a37 1531 * Find the vm_pt entry and it's parent for the given address.
4e2cb640 1532 */
cf2f0a37
AD
1533void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1534 struct amdgpu_vm_pt **entry,
1535 struct amdgpu_vm_pt **parent)
4e2cb640 1536{
196f7489 1537 unsigned level = p->adev->vm_manager.root_level;
4e2cb640 1538
cf2f0a37
AD
1539 *parent = NULL;
1540 *entry = &p->vm->root;
1541 while ((*entry)->entries) {
e3a1b32a 1542 unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
50783147 1543
cf2f0a37 1544 *parent = *entry;
e3a1b32a
CK
1545 *entry = &(*entry)->entries[addr >> shift];
1546 addr &= (1ULL << shift) - 1;
4e2cb640
CK
1547 }
1548
196f7489 1549 if (level != AMDGPU_VM_PTB)
cf2f0a37
AD
1550 *entry = NULL;
1551}
1552
1553/**
1554 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1555 *
1556 * @p: see amdgpu_pte_update_params definition
1557 * @entry: vm_pt entry to check
1558 * @parent: parent entry
1559 * @nptes: number of PTEs updated with this operation
1560 * @dst: destination address where the PTEs should point to
1561 * @flags: access flags fro the PTEs
1562 *
1563 * Check if we can update the PD with a huge page.
1564 */
ec5207c9
CK
1565static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1566 struct amdgpu_vm_pt *entry,
1567 struct amdgpu_vm_pt *parent,
1568 unsigned nptes, uint64_t dst,
1569 uint64_t flags)
cf2f0a37 1570{
373ac645 1571 uint64_t pde;
cf2f0a37
AD
1572
1573 /* In the case of a mixed PT the PDE must point to it*/
3cc1d3ea
CK
1574 if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1575 nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
4ab4016a 1576 /* Set the huge page flag to stop scanning at this PDE */
cf2f0a37
AD
1577 flags |= AMDGPU_PDE_PTE;
1578 }
1579
3cc1d3ea
CK
1580 if (!(flags & AMDGPU_PDE_PTE)) {
1581 if (entry->huge) {
1582 /* Add the entry to the relocated list to update it. */
1583 entry->huge = false;
bcdc9fd6 1584 amdgpu_vm_bo_relocated(&entry->base);
3cc1d3ea 1585 }
ec5207c9 1586 return;
3cc1d3ea 1587 }
cf2f0a37 1588
3cc1d3ea 1589 entry->huge = true;
132f34e4 1590 amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
3de676d8 1591
373ac645 1592 pde = (entry - parent->entries) * 8;
1c860a02 1593 amdgpu_vm_update_func(p, parent->base.bo, pde, dst, 1, 0, flags);
4e2cb640
CK
1594}
1595
d38ceaf9
AD
1596/**
1597 * amdgpu_vm_update_ptes - make sure that page tables are valid
1598 *
29efc4f5 1599 * @params: see amdgpu_pte_update_params definition
d38ceaf9
AD
1600 * @start: start of GPU address range
1601 * @end: end of GPU address range
677131a1 1602 * @dst: destination address to map to, the next dst inside the function
d38ceaf9
AD
1603 * @flags: mapping flags
1604 *
8843dbbb 1605 * Update the page tables in the range @start - @end.
7fc48e59
AG
1606 *
1607 * Returns:
1608 * 0 for success, -EINVAL for failure.
d38ceaf9 1609 */
cc28c4ed 1610static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
a1e08d3b 1611 uint64_t start, uint64_t end,
6b777607 1612 uint64_t dst, uint64_t flags)
d38ceaf9 1613{
36b32a68
ZJ
1614 struct amdgpu_device *adev = params->adev;
1615 const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
31f6c1fe 1616
301654a4 1617 uint64_t addr, pe_start;
21718497 1618 struct amdgpu_bo *pt;
301654a4 1619 unsigned nptes;
d38ceaf9
AD
1620
1621 /* walk over the address space and update the page tables */
cf2f0a37
AD
1622 for (addr = start; addr < end; addr += nptes,
1623 dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1624 struct amdgpu_vm_pt *entry, *parent;
1625
1626 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1627 if (!entry)
1628 return -ENOENT;
4e2cb640 1629
d38ceaf9
AD
1630 if ((addr & ~mask) == (end & ~mask))
1631 nptes = end - addr;
1632 else
36b32a68 1633 nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
d38ceaf9 1634
ec5207c9
CK
1635 amdgpu_vm_handle_huge_pages(params, entry, parent,
1636 nptes, dst, flags);
4ab4016a 1637 /* We don't need to update PTEs for huge pages */
78eb2f0c 1638 if (entry->huge)
cf2f0a37
AD
1639 continue;
1640
3f3333f8 1641 pt = entry->base.bo;
373ac645 1642 pe_start = (addr & mask) * 8;
1c860a02
CK
1643 amdgpu_vm_update_func(params, pt, pe_start, dst, nptes,
1644 AMDGPU_GPU_PAGE_SIZE, flags);
1645
d38ceaf9
AD
1646 }
1647
cc28c4ed 1648 return 0;
92696dd5
CK
1649}
1650
1651/*
1652 * amdgpu_vm_frag_ptes - add fragment information to PTEs
1653 *
1654 * @params: see amdgpu_pte_update_params definition
1655 * @vm: requested vm
1656 * @start: first PTE to handle
1657 * @end: last PTE to handle
1658 * @dst: addr those PTEs should point to
1659 * @flags: hw mapping flags
7fc48e59
AG
1660 *
1661 * Returns:
1662 * 0 for success, -EINVAL for failure.
92696dd5 1663 */
cc28c4ed 1664static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
92696dd5 1665 uint64_t start, uint64_t end,
6b777607 1666 uint64_t dst, uint64_t flags)
92696dd5
CK
1667{
1668 /**
1669 * The MC L1 TLB supports variable sized pages, based on a fragment
1670 * field in the PTE. When this field is set to a non-zero value, page
1671 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1672 * flags are considered valid for all PTEs within the fragment range
1673 * and corresponding mappings are assumed to be physically contiguous.
1674 *
1675 * The L1 TLB can store a single PTE for the whole fragment,
1676 * significantly increasing the space available for translation
1677 * caching. This leads to large improvements in throughput when the
1678 * TLB is under pressure.
1679 *
1680 * The L2 TLB distributes small and large fragments into two
1681 * asymmetric partitions. The large fragment cache is significantly
1682 * larger. Thus, we try to use large fragments wherever possible.
1683 * Userspace can support this by aligning virtual base address and
1684 * allocation size to the fragment size.
1685 */
6849d47c
RH
1686 unsigned max_frag = params->adev->vm_manager.fragment_size;
1687 int r;
92696dd5
CK
1688
1689 /* system pages are non continuously */
6849d47c 1690 if (params->src || !(flags & AMDGPU_PTE_VALID))
cc28c4ed 1691 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
92696dd5 1692
6849d47c
RH
1693 while (start != end) {
1694 uint64_t frag_flags, frag_end;
1695 unsigned frag;
1696
1697 /* This intentionally wraps around if no bit is set */
1698 frag = min((unsigned)ffs(start) - 1,
1699 (unsigned)fls64(end - start) - 1);
1700 if (frag >= max_frag) {
1701 frag_flags = AMDGPU_PTE_FRAG(max_frag);
1702 frag_end = end & ~((1ULL << max_frag) - 1);
1703 } else {
1704 frag_flags = AMDGPU_PTE_FRAG(frag);
1705 frag_end = start + (1 << frag);
1706 }
1707
1708 r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1709 flags | frag_flags);
cc28c4ed
HK
1710 if (r)
1711 return r;
92696dd5 1712
6849d47c
RH
1713 dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1714 start = frag_end;
92696dd5 1715 }
6849d47c
RH
1716
1717 return 0;
d38ceaf9
AD
1718}
1719
d38ceaf9
AD
1720/**
1721 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1722 *
1723 * @adev: amdgpu_device pointer
3cabaa54 1724 * @exclusive: fence we need to sync to
fa3ab3c7 1725 * @pages_addr: DMA addresses to use for mapping
d38ceaf9 1726 * @vm: requested vm
a14faa65
CK
1727 * @start: start of mapped range
1728 * @last: last mapped entry
1729 * @flags: flags for the entries
d38ceaf9 1730 * @addr: addr to set the area to
d38ceaf9
AD
1731 * @fence: optional resulting fence
1732 *
a14faa65 1733 * Fill in the page table entries between @start and @last.
7fc48e59
AG
1734 *
1735 * Returns:
1736 * 0 for success, -EINVAL for failure.
d38ceaf9
AD
1737 */
1738static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
f54d1867 1739 struct dma_fence *exclusive,
fa3ab3c7 1740 dma_addr_t *pages_addr,
d38ceaf9 1741 struct amdgpu_vm *vm,
a14faa65 1742 uint64_t start, uint64_t last,
6b777607 1743 uint64_t flags, uint64_t addr,
f54d1867 1744 struct dma_fence **fence)
d38ceaf9 1745{
2d55e45a 1746 struct amdgpu_ring *ring;
a1e08d3b 1747 void *owner = AMDGPU_FENCE_OWNER_VM;
d38ceaf9 1748 unsigned nptes, ncmds, ndw;
d71518b5 1749 struct amdgpu_job *job;
29efc4f5 1750 struct amdgpu_pte_update_params params;
f54d1867 1751 struct dma_fence *f = NULL;
d38ceaf9
AD
1752 int r;
1753
afef8b8f
CK
1754 memset(&params, 0, sizeof(params));
1755 params.adev = adev;
49ac8a24 1756 params.vm = vm;
afef8b8f 1757
a33cab7a
CK
1758 /* sync to everything on unmapping */
1759 if (!(flags & AMDGPU_PTE_VALID))
1760 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1761
b4d42511
HK
1762 if (vm->use_cpu_for_update) {
1763 /* params.src is used as flag to indicate system Memory */
1764 if (pages_addr)
1765 params.src = ~0;
1766
1767 /* Wait for PT BOs to be free. PTs share the same resv. object
1768 * as the root PD BO
1769 */
a33cab7a 1770 r = amdgpu_vm_wait_pd(adev, vm, owner);
b4d42511
HK
1771 if (unlikely(r))
1772 return r;
1773
1774 params.func = amdgpu_vm_cpu_set_ptes;
1775 params.pages_addr = pages_addr;
b4d42511
HK
1776 return amdgpu_vm_frag_ptes(&params, start, last + 1,
1777 addr, flags);
1778 }
1779
068c3304 1780 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
27c5f36f 1781
a14faa65 1782 nptes = last - start + 1;
d38ceaf9
AD
1783
1784 /*
86209523 1785 * reserve space for two commands every (1 << BLOCK_SIZE)
d38ceaf9 1786 * entries or 2k dwords (whatever is smaller)
86209523
BN
1787 *
1788 * The second command is for the shadow pagetables.
d38ceaf9 1789 */
104bd2ca
ED
1790 if (vm->root.base.bo->shadow)
1791 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1792 else
1793 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
d38ceaf9
AD
1794
1795 /* padding, etc. */
1796 ndw = 64;
1797
570144c6 1798 if (pages_addr) {
b0456f93 1799 /* copy commands needed */
e6d92197 1800 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
d38ceaf9 1801
b0456f93 1802 /* and also PTEs */
d38ceaf9
AD
1803 ndw += nptes * 2;
1804
afef8b8f
CK
1805 params.func = amdgpu_vm_do_copy_ptes;
1806
d38ceaf9
AD
1807 } else {
1808 /* set page commands needed */
44e1baeb 1809 ndw += ncmds * 10;
d38ceaf9 1810
6849d47c 1811 /* extra commands for begin/end fragments */
11528640
ED
1812 if (vm->root.base.bo->shadow)
1813 ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1814 else
1815 ndw += 2 * 10 * adev->vm_manager.fragment_size;
afef8b8f
CK
1816
1817 params.func = amdgpu_vm_do_set_ptes;
d38ceaf9
AD
1818 }
1819
d71518b5
CK
1820 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1821 if (r)
d38ceaf9 1822 return r;
d71518b5 1823
29efc4f5 1824 params.ib = &job->ibs[0];
d5fc5e82 1825
570144c6 1826 if (pages_addr) {
b0456f93
CK
1827 uint64_t *pte;
1828 unsigned i;
1829
1830 /* Put the PTEs at the end of the IB. */
1831 i = ndw - nptes * 2;
1832 pte= (uint64_t *)&(job->ibs->ptr[i]);
1833 params.src = job->ibs->gpu_addr + i * 4;
1834
1835 for (i = 0; i < nptes; ++i) {
1836 pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1837 AMDGPU_GPU_PAGE_SIZE);
1838 pte[i] |= flags;
1839 }
d7a4ac66 1840 addr = 0;
b0456f93
CK
1841 }
1842
cebb52b7 1843 r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
3cabaa54
CK
1844 if (r)
1845 goto error_free;
1846
3f3333f8 1847 r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
177ae09b 1848 owner, false);
a1e08d3b
CK
1849 if (r)
1850 goto error_free;
d38ceaf9 1851
3f3333f8 1852 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
a1e08d3b
CK
1853 if (r)
1854 goto error_free;
1855
cc28c4ed
HK
1856 r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1857 if (r)
1858 goto error_free;
d38ceaf9 1859
29efc4f5
CK
1860 amdgpu_ring_pad_ib(ring, params.ib);
1861 WARN_ON(params.ib->length_dw > ndw);
0e28b10f 1862 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
4af9f07c
CZ
1863 if (r)
1864 goto error_free;
d38ceaf9 1865
3f3333f8 1866 amdgpu_bo_fence(vm->root.base.bo, f, true);
284710fa
CK
1867 dma_fence_put(*fence);
1868 *fence = f;
d38ceaf9 1869 return 0;
d5fc5e82
CZ
1870
1871error_free:
d71518b5 1872 amdgpu_job_free(job);
4af9f07c 1873 return r;
d38ceaf9
AD
1874}
1875
a14faa65
CK
1876/**
1877 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1878 *
1879 * @adev: amdgpu_device pointer
3cabaa54 1880 * @exclusive: fence we need to sync to
8358dcee 1881 * @pages_addr: DMA addresses to use for mapping
a14faa65
CK
1882 * @vm: requested vm
1883 * @mapping: mapped range and flags to use for the update
8358dcee 1884 * @flags: HW flags for the mapping
63e0ba40 1885 * @nodes: array of drm_mm_nodes with the MC addresses
a14faa65
CK
1886 * @fence: optional resulting fence
1887 *
1888 * Split the mapping into smaller chunks so that each update fits
1889 * into a SDMA IB.
7fc48e59
AG
1890 *
1891 * Returns:
1892 * 0 for success, -EINVAL for failure.
a14faa65
CK
1893 */
1894static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
f54d1867 1895 struct dma_fence *exclusive,
8358dcee 1896 dma_addr_t *pages_addr,
a14faa65
CK
1897 struct amdgpu_vm *vm,
1898 struct amdgpu_bo_va_mapping *mapping,
6b777607 1899 uint64_t flags,
63e0ba40 1900 struct drm_mm_node *nodes,
f54d1867 1901 struct dma_fence **fence)
a14faa65 1902{
9fc8fc70 1903 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
570144c6 1904 uint64_t pfn, start = mapping->start;
a14faa65
CK
1905 int r;
1906
1907 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1908 * but in case of something, we filter the flags in first place
1909 */
1910 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1911 flags &= ~AMDGPU_PTE_READABLE;
1912 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1913 flags &= ~AMDGPU_PTE_WRITEABLE;
1914
15b31c59
AX
1915 flags &= ~AMDGPU_PTE_EXECUTABLE;
1916 flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1917
b0fd18b0
AX
1918 flags &= ~AMDGPU_PTE_MTYPE_MASK;
1919 flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1920
d0766e98
ZJ
1921 if ((mapping->flags & AMDGPU_PTE_PRT) &&
1922 (adev->asic_type >= CHIP_VEGA10)) {
1923 flags |= AMDGPU_PTE_PRT;
1924 flags &= ~AMDGPU_PTE_VALID;
1925 }
1926
a14faa65
CK
1927 trace_amdgpu_vm_bo_update(mapping);
1928
63e0ba40
CK
1929 pfn = mapping->offset >> PAGE_SHIFT;
1930 if (nodes) {
1931 while (pfn >= nodes->size) {
1932 pfn -= nodes->size;
1933 ++nodes;
1934 }
fa3ab3c7 1935 }
a14faa65 1936
63e0ba40 1937 do {
9fc8fc70 1938 dma_addr_t *dma_addr = NULL;
63e0ba40
CK
1939 uint64_t max_entries;
1940 uint64_t addr, last;
a14faa65 1941
63e0ba40
CK
1942 if (nodes) {
1943 addr = nodes->start << PAGE_SHIFT;
1944 max_entries = (nodes->size - pfn) *
463d2fe8 1945 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
63e0ba40
CK
1946 } else {
1947 addr = 0;
1948 max_entries = S64_MAX;
1949 }
a14faa65 1950
63e0ba40 1951 if (pages_addr) {
9fc8fc70
CK
1952 uint64_t count;
1953
457e0fee 1954 max_entries = min(max_entries, 16ull * 1024ull);
38e624a1 1955 for (count = 1;
463d2fe8 1956 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
38e624a1 1957 ++count) {
9fc8fc70
CK
1958 uint64_t idx = pfn + count;
1959
1960 if (pages_addr[idx] !=
1961 (pages_addr[idx - 1] + PAGE_SIZE))
1962 break;
1963 }
1964
1965 if (count < min_linear_pages) {
1966 addr = pfn << PAGE_SHIFT;
1967 dma_addr = pages_addr;
1968 } else {
1969 addr = pages_addr[pfn];
463d2fe8 1970 max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
9fc8fc70
CK
1971 }
1972
63e0ba40
CK
1973 } else if (flags & AMDGPU_PTE_VALID) {
1974 addr += adev->vm_manager.vram_base_offset;
9fc8fc70 1975 addr += pfn << PAGE_SHIFT;
63e0ba40 1976 }
63e0ba40 1977
a9f87f64 1978 last = min((uint64_t)mapping->last, start + max_entries - 1);
9fc8fc70 1979 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
a14faa65
CK
1980 start, last, flags, addr,
1981 fence);
1982 if (r)
1983 return r;
1984
463d2fe8 1985 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
63e0ba40
CK
1986 if (nodes && nodes->size == pfn) {
1987 pfn = 0;
1988 ++nodes;
1989 }
a14faa65 1990 start = last + 1;
63e0ba40 1991
a9f87f64 1992 } while (unlikely(start != mapping->last + 1));
a14faa65
CK
1993
1994 return 0;
1995}
1996
d38ceaf9
AD
1997/**
1998 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1999 *
2000 * @adev: amdgpu_device pointer
2001 * @bo_va: requested BO and VM object
99e124f4 2002 * @clear: if true clear the entries
d38ceaf9
AD
2003 *
2004 * Fill in the page table entries for @bo_va.
7fc48e59
AG
2005 *
2006 * Returns:
2007 * 0 for success, -EINVAL for failure.
d38ceaf9
AD
2008 */
2009int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2010 struct amdgpu_bo_va *bo_va,
99e124f4 2011 bool clear)
d38ceaf9 2012{
ec681545
CK
2013 struct amdgpu_bo *bo = bo_va->base.bo;
2014 struct amdgpu_vm *vm = bo_va->base.vm;
d38ceaf9 2015 struct amdgpu_bo_va_mapping *mapping;
8358dcee 2016 dma_addr_t *pages_addr = NULL;
99e124f4 2017 struct ttm_mem_reg *mem;
63e0ba40 2018 struct drm_mm_node *nodes;
4e55eb38 2019 struct dma_fence *exclusive, **last_update;
457e0fee 2020 uint64_t flags;
d38ceaf9
AD
2021 int r;
2022
7eb80427 2023 if (clear || !bo) {
99e124f4 2024 mem = NULL;
63e0ba40 2025 nodes = NULL;
99e124f4
CK
2026 exclusive = NULL;
2027 } else {
8358dcee
CK
2028 struct ttm_dma_tt *ttm;
2029
7eb80427 2030 mem = &bo->tbo.mem;
63e0ba40
CK
2031 nodes = mem->mm_node;
2032 if (mem->mem_type == TTM_PL_TT) {
7eb80427 2033 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
8358dcee 2034 pages_addr = ttm->dma_address;
9ab21462 2035 }
ec681545 2036 exclusive = reservation_object_get_excl(bo->tbo.resv);
d38ceaf9
AD
2037 }
2038
457e0fee 2039 if (bo)
ec681545 2040 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
457e0fee 2041 else
a5f6b5b1 2042 flags = 0x0;
d38ceaf9 2043
4e55eb38
CK
2044 if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
2045 last_update = &vm->last_update;
2046 else
2047 last_update = &bo_va->last_pt_update;
2048
3d7d4d3a
CK
2049 if (!clear && bo_va->base.moved) {
2050 bo_va->base.moved = false;
7fc11959 2051 list_splice_init(&bo_va->valids, &bo_va->invalids);
3d7d4d3a 2052
cb7b6ec2
CK
2053 } else if (bo_va->cleared != clear) {
2054 list_splice_init(&bo_va->valids, &bo_va->invalids);
3d7d4d3a 2055 }
7fc11959
CK
2056
2057 list_for_each_entry(mapping, &bo_va->invalids, list) {
457e0fee 2058 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
63e0ba40 2059 mapping, flags, nodes,
4e55eb38 2060 last_update);
d38ceaf9
AD
2061 if (r)
2062 return r;
2063 }
2064
cb7b6ec2
CK
2065 if (vm->use_cpu_for_update) {
2066 /* Flush HDP */
2067 mb();
69882565 2068 amdgpu_asic_flush_hdp(adev, NULL);
d6c10f6b
CK
2069 }
2070
bb475839
JZ
2071 /* If the BO is not in its preferred location add it back to
2072 * the evicted list so that it gets validated again on the
2073 * next command submission.
2074 */
806f043f
CK
2075 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2076 uint32_t mem_type = bo->tbo.mem.mem_type;
2077
2078 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
bcdc9fd6 2079 amdgpu_vm_bo_evicted(&bo_va->base);
806f043f 2080 else
bcdc9fd6 2081 amdgpu_vm_bo_idle(&bo_va->base);
c12a2ee5 2082 } else {
bcdc9fd6 2083 amdgpu_vm_bo_done(&bo_va->base);
806f043f 2084 }
d38ceaf9 2085
cb7b6ec2
CK
2086 list_splice_init(&bo_va->invalids, &bo_va->valids);
2087 bo_va->cleared = clear;
2088
2089 if (trace_amdgpu_vm_bo_mapping_enabled()) {
2090 list_for_each_entry(mapping, &bo_va->valids, list)
2091 trace_amdgpu_vm_bo_mapping(mapping);
68c62306
CK
2092 }
2093
d38ceaf9
AD
2094 return 0;
2095}
2096
284710fa
CK
2097/**
2098 * amdgpu_vm_update_prt_state - update the global PRT state
7fc48e59
AG
2099 *
2100 * @adev: amdgpu_device pointer
284710fa
CK
2101 */
2102static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
2103{
2104 unsigned long flags;
2105 bool enable;
2106
2107 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
451bc8eb 2108 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
132f34e4 2109 adev->gmc.gmc_funcs->set_prt(adev, enable);
284710fa
CK
2110 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
2111}
2112
451bc8eb 2113/**
4388fc2a 2114 * amdgpu_vm_prt_get - add a PRT user
7fc48e59
AG
2115 *
2116 * @adev: amdgpu_device pointer
451bc8eb
CK
2117 */
2118static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
2119{
132f34e4 2120 if (!adev->gmc.gmc_funcs->set_prt)
4388fc2a
CK
2121 return;
2122
451bc8eb
CK
2123 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
2124 amdgpu_vm_update_prt_state(adev);
2125}
2126
0b15f2fc
CK
2127/**
2128 * amdgpu_vm_prt_put - drop a PRT user
7fc48e59
AG
2129 *
2130 * @adev: amdgpu_device pointer
0b15f2fc
CK
2131 */
2132static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
2133{
451bc8eb 2134 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
0b15f2fc
CK
2135 amdgpu_vm_update_prt_state(adev);
2136}
2137
284710fa 2138/**
451bc8eb 2139 * amdgpu_vm_prt_cb - callback for updating the PRT status
7fc48e59
AG
2140 *
2141 * @fence: fence for the callback
00553cf8 2142 * @_cb: the callback function
284710fa
CK
2143 */
2144static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
2145{
2146 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
2147
0b15f2fc 2148 amdgpu_vm_prt_put(cb->adev);
284710fa
CK
2149 kfree(cb);
2150}
2151
451bc8eb
CK
2152/**
2153 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
7fc48e59
AG
2154 *
2155 * @adev: amdgpu_device pointer
2156 * @fence: fence for the callback
451bc8eb
CK
2157 */
2158static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
2159 struct dma_fence *fence)
2160{
4388fc2a 2161 struct amdgpu_prt_cb *cb;
451bc8eb 2162
132f34e4 2163 if (!adev->gmc.gmc_funcs->set_prt)
4388fc2a
CK
2164 return;
2165
2166 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
451bc8eb
CK
2167 if (!cb) {
2168 /* Last resort when we are OOM */
2169 if (fence)
2170 dma_fence_wait(fence, false);
2171
486a68f5 2172 amdgpu_vm_prt_put(adev);
451bc8eb
CK
2173 } else {
2174 cb->adev = adev;
2175 if (!fence || dma_fence_add_callback(fence, &cb->cb,
2176 amdgpu_vm_prt_cb))
2177 amdgpu_vm_prt_cb(fence, &cb->cb);
2178 }
2179}
2180
284710fa
CK
2181/**
2182 * amdgpu_vm_free_mapping - free a mapping
2183 *
2184 * @adev: amdgpu_device pointer
2185 * @vm: requested vm
2186 * @mapping: mapping to be freed
2187 * @fence: fence of the unmap operation
2188 *
2189 * Free a mapping and make sure we decrease the PRT usage count if applicable.
2190 */
2191static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
2192 struct amdgpu_vm *vm,
2193 struct amdgpu_bo_va_mapping *mapping,
2194 struct dma_fence *fence)
2195{
451bc8eb
CK
2196 if (mapping->flags & AMDGPU_PTE_PRT)
2197 amdgpu_vm_add_prt_cb(adev, fence);
2198 kfree(mapping);
2199}
284710fa 2200
451bc8eb
CK
2201/**
2202 * amdgpu_vm_prt_fini - finish all prt mappings
2203 *
2204 * @adev: amdgpu_device pointer
2205 * @vm: requested vm
2206 *
2207 * Register a cleanup callback to disable PRT support after VM dies.
2208 */
2209static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2210{
3f3333f8 2211 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
451bc8eb
CK
2212 struct dma_fence *excl, **shared;
2213 unsigned i, shared_count;
2214 int r;
0b15f2fc 2215
451bc8eb
CK
2216 r = reservation_object_get_fences_rcu(resv, &excl,
2217 &shared_count, &shared);
2218 if (r) {
2219 /* Not enough memory to grab the fence list, as last resort
2220 * block for all the fences to complete.
2221 */
2222 reservation_object_wait_timeout_rcu(resv, true, false,
2223 MAX_SCHEDULE_TIMEOUT);
2224 return;
284710fa 2225 }
451bc8eb
CK
2226
2227 /* Add a callback for each fence in the reservation object */
2228 amdgpu_vm_prt_get(adev);
2229 amdgpu_vm_add_prt_cb(adev, excl);
2230
2231 for (i = 0; i < shared_count; ++i) {
2232 amdgpu_vm_prt_get(adev);
2233 amdgpu_vm_add_prt_cb(adev, shared[i]);
2234 }
2235
2236 kfree(shared);
284710fa
CK
2237}
2238
d38ceaf9
AD
2239/**
2240 * amdgpu_vm_clear_freed - clear freed BOs in the PT
2241 *
2242 * @adev: amdgpu_device pointer
2243 * @vm: requested vm
f3467818
NH
2244 * @fence: optional resulting fence (unchanged if no work needed to be done
2245 * or if an error occurred)
d38ceaf9
AD
2246 *
2247 * Make sure all freed BOs are cleared in the PT.
d38ceaf9 2248 * PTs have to be reserved and mutex must be locked!
7fc48e59
AG
2249 *
2250 * Returns:
2251 * 0 for success.
2252 *
d38ceaf9
AD
2253 */
2254int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
f3467818
NH
2255 struct amdgpu_vm *vm,
2256 struct dma_fence **fence)
d38ceaf9
AD
2257{
2258 struct amdgpu_bo_va_mapping *mapping;
4584312d 2259 uint64_t init_pte_value = 0;
f3467818 2260 struct dma_fence *f = NULL;
d38ceaf9
AD
2261 int r;
2262
2263 while (!list_empty(&vm->freed)) {
2264 mapping = list_first_entry(&vm->freed,
2265 struct amdgpu_bo_va_mapping, list);
2266 list_del(&mapping->list);
e17841b9 2267
ad9a5b78
CK
2268 if (vm->pte_support_ats &&
2269 mapping->start < AMDGPU_GMC_HOLE_START)
6d16dac8 2270 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
51ac7eec 2271
570144c6 2272 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
fc6aa33d 2273 mapping->start, mapping->last,
51ac7eec 2274 init_pte_value, 0, &f);
f3467818 2275 amdgpu_vm_free_mapping(adev, vm, mapping, f);
284710fa 2276 if (r) {
f3467818 2277 dma_fence_put(f);
d38ceaf9 2278 return r;
284710fa 2279 }
f3467818 2280 }
d38ceaf9 2281
f3467818
NH
2282 if (fence && f) {
2283 dma_fence_put(*fence);
2284 *fence = f;
2285 } else {
2286 dma_fence_put(f);
d38ceaf9 2287 }
f3467818 2288
d38ceaf9
AD
2289 return 0;
2290
2291}
2292
2293/**
73fb16e7 2294 * amdgpu_vm_handle_moved - handle moved BOs in the PT
d38ceaf9
AD
2295 *
2296 * @adev: amdgpu_device pointer
2297 * @vm: requested vm
2298 *
73fb16e7 2299 * Make sure all BOs which are moved are updated in the PTs.
7fc48e59
AG
2300 *
2301 * Returns:
2302 * 0 for success.
d38ceaf9 2303 *
73fb16e7 2304 * PTs have to be reserved!
d38ceaf9 2305 */
73fb16e7 2306int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
4e55eb38 2307 struct amdgpu_vm *vm)
d38ceaf9 2308{
789f3317 2309 struct amdgpu_bo_va *bo_va, *tmp;
c12a2ee5 2310 struct reservation_object *resv;
73fb16e7 2311 bool clear;
789f3317 2312 int r;
d38ceaf9 2313
c12a2ee5
CK
2314 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2315 /* Per VM BOs never need to bo cleared in the page tables */
2316 r = amdgpu_vm_bo_update(adev, bo_va, false);
2317 if (r)
2318 return r;
2319 }
32b41ac2 2320
c12a2ee5
CK
2321 spin_lock(&vm->invalidated_lock);
2322 while (!list_empty(&vm->invalidated)) {
2323 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2324 base.vm_status);
2325 resv = bo_va->base.bo->tbo.resv;
2326 spin_unlock(&vm->invalidated_lock);
ec363e0d 2327
ec363e0d 2328 /* Try to reserve the BO to avoid clearing its ptes */
c12a2ee5 2329 if (!amdgpu_vm_debug && reservation_object_trylock(resv))
ec363e0d
CK
2330 clear = false;
2331 /* Somebody else is using the BO right now */
2332 else
2333 clear = true;
73fb16e7
CK
2334
2335 r = amdgpu_vm_bo_update(adev, bo_va, clear);
c12a2ee5 2336 if (r)
d38ceaf9
AD
2337 return r;
2338
c12a2ee5 2339 if (!clear)
ec363e0d 2340 reservation_object_unlock(resv);
c12a2ee5 2341 spin_lock(&vm->invalidated_lock);
d38ceaf9 2342 }
c12a2ee5 2343 spin_unlock(&vm->invalidated_lock);
d38ceaf9 2344
789f3317 2345 return 0;
d38ceaf9
AD
2346}
2347
2348/**
2349 * amdgpu_vm_bo_add - add a bo to a specific vm
2350 *
2351 * @adev: amdgpu_device pointer
2352 * @vm: requested vm
2353 * @bo: amdgpu buffer object
2354 *
8843dbbb 2355 * Add @bo into the requested vm.
d38ceaf9 2356 * Add @bo to the list of bos associated with the vm
7fc48e59
AG
2357 *
2358 * Returns:
2359 * Newly added bo_va or NULL for failure
d38ceaf9
AD
2360 *
2361 * Object has to be reserved!
2362 */
2363struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2364 struct amdgpu_vm *vm,
2365 struct amdgpu_bo *bo)
2366{
2367 struct amdgpu_bo_va *bo_va;
2368
2369 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2370 if (bo_va == NULL) {
2371 return NULL;
2372 }
3f4299be 2373 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
ec681545 2374
d38ceaf9 2375 bo_va->ref_count = 1;
7fc11959
CK
2376 INIT_LIST_HEAD(&bo_va->valids);
2377 INIT_LIST_HEAD(&bo_va->invalids);
32b41ac2 2378
d38ceaf9
AD
2379 return bo_va;
2380}
2381
73fb16e7
CK
2382
2383/**
2384 * amdgpu_vm_bo_insert_mapping - insert a new mapping
2385 *
2386 * @adev: amdgpu_device pointer
2387 * @bo_va: bo_va to store the address
2388 * @mapping: the mapping to insert
2389 *
2390 * Insert a new mapping into all structures.
2391 */
2392static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2393 struct amdgpu_bo_va *bo_va,
2394 struct amdgpu_bo_va_mapping *mapping)
2395{
2396 struct amdgpu_vm *vm = bo_va->base.vm;
2397 struct amdgpu_bo *bo = bo_va->base.bo;
2398
aebc5e6f 2399 mapping->bo_va = bo_va;
73fb16e7
CK
2400 list_add(&mapping->list, &bo_va->invalids);
2401 amdgpu_vm_it_insert(mapping, &vm->va);
2402
2403 if (mapping->flags & AMDGPU_PTE_PRT)
2404 amdgpu_vm_prt_get(adev);
2405
862b8c57
CK
2406 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2407 !bo_va->base.moved) {
862b8c57 2408 list_move(&bo_va->base.vm_status, &vm->moved);
73fb16e7
CK
2409 }
2410 trace_amdgpu_vm_bo_map(bo_va, mapping);
2411}
2412
d38ceaf9
AD
2413/**
2414 * amdgpu_vm_bo_map - map bo inside a vm
2415 *
2416 * @adev: amdgpu_device pointer
2417 * @bo_va: bo_va to store the address
2418 * @saddr: where to map the BO
2419 * @offset: requested offset in the BO
00553cf8 2420 * @size: BO size in bytes
d38ceaf9
AD
2421 * @flags: attributes of pages (read/write/valid/etc.)
2422 *
2423 * Add a mapping of the BO at the specefied addr into the VM.
7fc48e59
AG
2424 *
2425 * Returns:
2426 * 0 for success, error for failure.
d38ceaf9 2427 *
49b02b18 2428 * Object has to be reserved and unreserved outside!
d38ceaf9
AD
2429 */
2430int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2431 struct amdgpu_bo_va *bo_va,
2432 uint64_t saddr, uint64_t offset,
268c3001 2433 uint64_t size, uint64_t flags)
d38ceaf9 2434{
a9f87f64 2435 struct amdgpu_bo_va_mapping *mapping, *tmp;
ec681545
CK
2436 struct amdgpu_bo *bo = bo_va->base.bo;
2437 struct amdgpu_vm *vm = bo_va->base.vm;
d38ceaf9 2438 uint64_t eaddr;
d38ceaf9 2439
0be52de9
CK
2440 /* validate the parameters */
2441 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
49b02b18 2442 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
0be52de9 2443 return -EINVAL;
0be52de9 2444
d38ceaf9 2445 /* make sure object fit at this offset */
005ae95e 2446 eaddr = saddr + size - 1;
a5f6b5b1 2447 if (saddr >= eaddr ||
ec681545 2448 (bo && offset + size > amdgpu_bo_size(bo)))
d38ceaf9 2449 return -EINVAL;
d38ceaf9 2450
d38ceaf9
AD
2451 saddr /= AMDGPU_GPU_PAGE_SIZE;
2452 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2453
a9f87f64
CK
2454 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2455 if (tmp) {
d38ceaf9
AD
2456 /* bo and tmp overlap, invalid addr */
2457 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
ec681545 2458 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
a9f87f64 2459 tmp->start, tmp->last + 1);
663e4577 2460 return -EINVAL;
d38ceaf9
AD
2461 }
2462
2463 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
663e4577
CK
2464 if (!mapping)
2465 return -ENOMEM;
d38ceaf9 2466
a9f87f64
CK
2467 mapping->start = saddr;
2468 mapping->last = eaddr;
d38ceaf9
AD
2469 mapping->offset = offset;
2470 mapping->flags = flags;
2471
73fb16e7 2472 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
80f95c57
CK
2473
2474 return 0;
2475}
2476
2477/**
2478 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2479 *
2480 * @adev: amdgpu_device pointer
2481 * @bo_va: bo_va to store the address
2482 * @saddr: where to map the BO
2483 * @offset: requested offset in the BO
00553cf8 2484 * @size: BO size in bytes
80f95c57
CK
2485 * @flags: attributes of pages (read/write/valid/etc.)
2486 *
2487 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2488 * mappings as we do so.
7fc48e59
AG
2489 *
2490 * Returns:
2491 * 0 for success, error for failure.
80f95c57
CK
2492 *
2493 * Object has to be reserved and unreserved outside!
2494 */
2495int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2496 struct amdgpu_bo_va *bo_va,
2497 uint64_t saddr, uint64_t offset,
2498 uint64_t size, uint64_t flags)
2499{
2500 struct amdgpu_bo_va_mapping *mapping;
ec681545 2501 struct amdgpu_bo *bo = bo_va->base.bo;
80f95c57
CK
2502 uint64_t eaddr;
2503 int r;
2504
2505 /* validate the parameters */
2506 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2507 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2508 return -EINVAL;
2509
2510 /* make sure object fit at this offset */
2511 eaddr = saddr + size - 1;
2512 if (saddr >= eaddr ||
ec681545 2513 (bo && offset + size > amdgpu_bo_size(bo)))
80f95c57
CK
2514 return -EINVAL;
2515
2516 /* Allocate all the needed memory */
2517 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2518 if (!mapping)
2519 return -ENOMEM;
2520
ec681545 2521 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
80f95c57
CK
2522 if (r) {
2523 kfree(mapping);
2524 return r;
2525 }
2526
2527 saddr /= AMDGPU_GPU_PAGE_SIZE;
2528 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2529
a9f87f64
CK
2530 mapping->start = saddr;
2531 mapping->last = eaddr;
80f95c57
CK
2532 mapping->offset = offset;
2533 mapping->flags = flags;
2534
73fb16e7 2535 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
4388fc2a 2536
d38ceaf9 2537 return 0;
d38ceaf9
AD
2538}
2539
2540/**
2541 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2542 *
2543 * @adev: amdgpu_device pointer
2544 * @bo_va: bo_va to remove the address from
2545 * @saddr: where to the BO is mapped
2546 *
2547 * Remove a mapping of the BO at the specefied addr from the VM.
7fc48e59
AG
2548 *
2549 * Returns:
2550 * 0 for success, error for failure.
d38ceaf9 2551 *
49b02b18 2552 * Object has to be reserved and unreserved outside!
d38ceaf9
AD
2553 */
2554int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2555 struct amdgpu_bo_va *bo_va,
2556 uint64_t saddr)
2557{
2558 struct amdgpu_bo_va_mapping *mapping;
ec681545 2559 struct amdgpu_vm *vm = bo_va->base.vm;
7fc11959 2560 bool valid = true;
d38ceaf9 2561
6c7fc503 2562 saddr /= AMDGPU_GPU_PAGE_SIZE;
32b41ac2 2563
7fc11959 2564 list_for_each_entry(mapping, &bo_va->valids, list) {
a9f87f64 2565 if (mapping->start == saddr)
d38ceaf9
AD
2566 break;
2567 }
2568
7fc11959
CK
2569 if (&mapping->list == &bo_va->valids) {
2570 valid = false;
2571
2572 list_for_each_entry(mapping, &bo_va->invalids, list) {
a9f87f64 2573 if (mapping->start == saddr)
7fc11959
CK
2574 break;
2575 }
2576
32b41ac2 2577 if (&mapping->list == &bo_va->invalids)
7fc11959 2578 return -ENOENT;
d38ceaf9 2579 }
32b41ac2 2580
d38ceaf9 2581 list_del(&mapping->list);
a9f87f64 2582 amdgpu_vm_it_remove(mapping, &vm->va);
aebc5e6f 2583 mapping->bo_va = NULL;
93e3e438 2584 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
d38ceaf9 2585
e17841b9 2586 if (valid)
d38ceaf9 2587 list_add(&mapping->list, &vm->freed);
e17841b9 2588 else
284710fa
CK
2589 amdgpu_vm_free_mapping(adev, vm, mapping,
2590 bo_va->last_pt_update);
d38ceaf9
AD
2591
2592 return 0;
2593}
2594
dc54d3d1
CK
2595/**
2596 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2597 *
2598 * @adev: amdgpu_device pointer
2599 * @vm: VM structure to use
2600 * @saddr: start of the range
2601 * @size: size of the range
2602 *
2603 * Remove all mappings in a range, split them as appropriate.
7fc48e59
AG
2604 *
2605 * Returns:
2606 * 0 for success, error for failure.
dc54d3d1
CK
2607 */
2608int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2609 struct amdgpu_vm *vm,
2610 uint64_t saddr, uint64_t size)
2611{
2612 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
dc54d3d1
CK
2613 LIST_HEAD(removed);
2614 uint64_t eaddr;
2615
2616 eaddr = saddr + size - 1;
2617 saddr /= AMDGPU_GPU_PAGE_SIZE;
2618 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2619
2620 /* Allocate all the needed memory */
2621 before = kzalloc(sizeof(*before), GFP_KERNEL);
2622 if (!before)
2623 return -ENOMEM;
27f6d610 2624 INIT_LIST_HEAD(&before->list);
dc54d3d1
CK
2625
2626 after = kzalloc(sizeof(*after), GFP_KERNEL);
2627 if (!after) {
2628 kfree(before);
2629 return -ENOMEM;
2630 }
27f6d610 2631 INIT_LIST_HEAD(&after->list);
dc54d3d1
CK
2632
2633 /* Now gather all removed mappings */
a9f87f64
CK
2634 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2635 while (tmp) {
dc54d3d1 2636 /* Remember mapping split at the start */
a9f87f64
CK
2637 if (tmp->start < saddr) {
2638 before->start = tmp->start;
2639 before->last = saddr - 1;
dc54d3d1
CK
2640 before->offset = tmp->offset;
2641 before->flags = tmp->flags;
387f49e5
JZ
2642 before->bo_va = tmp->bo_va;
2643 list_add(&before->list, &tmp->bo_va->invalids);
dc54d3d1
CK
2644 }
2645
2646 /* Remember mapping split at the end */
a9f87f64
CK
2647 if (tmp->last > eaddr) {
2648 after->start = eaddr + 1;
2649 after->last = tmp->last;
dc54d3d1 2650 after->offset = tmp->offset;
a9f87f64 2651 after->offset += after->start - tmp->start;
dc54d3d1 2652 after->flags = tmp->flags;
387f49e5
JZ
2653 after->bo_va = tmp->bo_va;
2654 list_add(&after->list, &tmp->bo_va->invalids);
dc54d3d1
CK
2655 }
2656
2657 list_del(&tmp->list);
2658 list_add(&tmp->list, &removed);
a9f87f64
CK
2659
2660 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
dc54d3d1
CK
2661 }
2662
2663 /* And free them up */
2664 list_for_each_entry_safe(tmp, next, &removed, list) {
a9f87f64 2665 amdgpu_vm_it_remove(tmp, &vm->va);
dc54d3d1
CK
2666 list_del(&tmp->list);
2667
a9f87f64
CK
2668 if (tmp->start < saddr)
2669 tmp->start = saddr;
2670 if (tmp->last > eaddr)
2671 tmp->last = eaddr;
dc54d3d1 2672
aebc5e6f 2673 tmp->bo_va = NULL;
dc54d3d1
CK
2674 list_add(&tmp->list, &vm->freed);
2675 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2676 }
2677
27f6d610
JZ
2678 /* Insert partial mapping before the range */
2679 if (!list_empty(&before->list)) {
a9f87f64 2680 amdgpu_vm_it_insert(before, &vm->va);
dc54d3d1
CK
2681 if (before->flags & AMDGPU_PTE_PRT)
2682 amdgpu_vm_prt_get(adev);
2683 } else {
2684 kfree(before);
2685 }
2686
2687 /* Insert partial mapping after the range */
27f6d610 2688 if (!list_empty(&after->list)) {
a9f87f64 2689 amdgpu_vm_it_insert(after, &vm->va);
dc54d3d1
CK
2690 if (after->flags & AMDGPU_PTE_PRT)
2691 amdgpu_vm_prt_get(adev);
2692 } else {
2693 kfree(after);
2694 }
2695
2696 return 0;
2697}
2698
aebc5e6f
CK
2699/**
2700 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2701 *
2702 * @vm: the requested VM
00553cf8 2703 * @addr: the address
aebc5e6f
CK
2704 *
2705 * Find a mapping by it's address.
7fc48e59
AG
2706 *
2707 * Returns:
2708 * The amdgpu_bo_va_mapping matching for addr or NULL
2709 *
aebc5e6f
CK
2710 */
2711struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2712 uint64_t addr)
2713{
2714 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2715}
2716
8ab19ea6
CK
2717/**
2718 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2719 *
2720 * @vm: the requested vm
2721 * @ticket: CS ticket
2722 *
2723 * Trace all mappings of BOs reserved during a command submission.
2724 */
2725void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2726{
2727 struct amdgpu_bo_va_mapping *mapping;
2728
2729 if (!trace_amdgpu_vm_bo_cs_enabled())
2730 return;
2731
2732 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2733 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2734 if (mapping->bo_va && mapping->bo_va->base.bo) {
2735 struct amdgpu_bo *bo;
2736
2737 bo = mapping->bo_va->base.bo;
2738 if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2739 continue;
2740 }
2741
2742 trace_amdgpu_vm_bo_cs(mapping);
2743 }
2744}
2745
d38ceaf9
AD
2746/**
2747 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2748 *
2749 * @adev: amdgpu_device pointer
2750 * @bo_va: requested bo_va
2751 *
8843dbbb 2752 * Remove @bo_va->bo from the requested vm.
d38ceaf9
AD
2753 *
2754 * Object have to be reserved!
2755 */
2756void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2757 struct amdgpu_bo_va *bo_va)
2758{
2759 struct amdgpu_bo_va_mapping *mapping, *next;
fbbf794c 2760 struct amdgpu_bo *bo = bo_va->base.bo;
ec681545 2761 struct amdgpu_vm *vm = bo_va->base.vm;
d38ceaf9 2762
fbbf794c
CK
2763 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)
2764 vm->bulk_moveable = false;
2765
ec681545 2766 list_del(&bo_va->base.bo_list);
d38ceaf9 2767
c12a2ee5 2768 spin_lock(&vm->invalidated_lock);
ec681545 2769 list_del(&bo_va->base.vm_status);
c12a2ee5 2770 spin_unlock(&vm->invalidated_lock);
d38ceaf9 2771
7fc11959 2772 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
d38ceaf9 2773 list_del(&mapping->list);
a9f87f64 2774 amdgpu_vm_it_remove(mapping, &vm->va);
aebc5e6f 2775 mapping->bo_va = NULL;
93e3e438 2776 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
7fc11959
CK
2777 list_add(&mapping->list, &vm->freed);
2778 }
2779 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2780 list_del(&mapping->list);
a9f87f64 2781 amdgpu_vm_it_remove(mapping, &vm->va);
284710fa
CK
2782 amdgpu_vm_free_mapping(adev, vm, mapping,
2783 bo_va->last_pt_update);
d38ceaf9 2784 }
32b41ac2 2785
f54d1867 2786 dma_fence_put(bo_va->last_pt_update);
d38ceaf9 2787 kfree(bo_va);
d38ceaf9
AD
2788}
2789
2790/**
2791 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2792 *
2793 * @adev: amdgpu_device pointer
d38ceaf9 2794 * @bo: amdgpu buffer object
00553cf8 2795 * @evicted: is the BO evicted
d38ceaf9 2796 *
8843dbbb 2797 * Mark @bo as invalid.
d38ceaf9
AD
2798 */
2799void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
3f3333f8 2800 struct amdgpu_bo *bo, bool evicted)
d38ceaf9 2801{
ec681545
CK
2802 struct amdgpu_vm_bo_base *bo_base;
2803
4bebccee
CZ
2804 /* shadow bo doesn't have bo base, its validation needs its parent */
2805 if (bo->parent && bo->parent->shadow == bo)
2806 bo = bo->parent;
2807
ec681545 2808 list_for_each_entry(bo_base, &bo->va, bo_list) {
3f3333f8
CK
2809 struct amdgpu_vm *vm = bo_base->vm;
2810
3f3333f8 2811 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
bcdc9fd6 2812 amdgpu_vm_bo_evicted(bo_base);
3f3333f8
CK
2813 continue;
2814 }
2815
bcdc9fd6 2816 if (bo_base->moved)
3f3333f8 2817 continue;
bcdc9fd6 2818 bo_base->moved = true;
3f3333f8 2819
bcdc9fd6
CK
2820 if (bo->tbo.type == ttm_bo_type_kernel)
2821 amdgpu_vm_bo_relocated(bo_base);
2822 else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2823 amdgpu_vm_bo_moved(bo_base);
2824 else
2825 amdgpu_vm_bo_invalidated(bo_base);
d38ceaf9
AD
2826 }
2827}
2828
7fc48e59
AG
2829/**
2830 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2831 *
2832 * @vm_size: VM size
2833 *
2834 * Returns:
2835 * VM page table as power of two
2836 */
bab4fee7
JZ
2837static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2838{
2839 /* Total bits covered by PD + PTs */
2840 unsigned bits = ilog2(vm_size) + 18;
2841
2842 /* Make sure the PD is 4K in size up to 8GB address space.
2843 Above that split equal between PD and PTs */
2844 if (vm_size <= 8)
2845 return (bits - 9);
2846 else
2847 return ((bits + 3) / 2);
2848}
2849
d07f14be
RH
2850/**
2851 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
bab4fee7
JZ
2852 *
2853 * @adev: amdgpu_device pointer
43370c4c 2854 * @min_vm_size: the minimum vm size in GB if it's set auto
00553cf8
AG
2855 * @fragment_size_default: Default PTE fragment size
2856 * @max_level: max VMPT level
2857 * @max_bits: max address space size in bits
2858 *
bab4fee7 2859 */
43370c4c 2860void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
f3368128
CK
2861 uint32_t fragment_size_default, unsigned max_level,
2862 unsigned max_bits)
bab4fee7 2863{
43370c4c
FK
2864 unsigned int max_size = 1 << (max_bits - 30);
2865 unsigned int vm_size;
36539dce
CK
2866 uint64_t tmp;
2867
2868 /* adjust vm size first */
f3368128 2869 if (amdgpu_vm_size != -1) {
fdd5faaa 2870 vm_size = amdgpu_vm_size;
f3368128
CK
2871 if (vm_size > max_size) {
2872 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2873 amdgpu_vm_size, max_size);
2874 vm_size = max_size;
2875 }
43370c4c
FK
2876 } else {
2877 struct sysinfo si;
2878 unsigned int phys_ram_gb;
2879
2880 /* Optimal VM size depends on the amount of physical
2881 * RAM available. Underlying requirements and
2882 * assumptions:
2883 *
2884 * - Need to map system memory and VRAM from all GPUs
2885 * - VRAM from other GPUs not known here
2886 * - Assume VRAM <= system memory
2887 * - On GFX8 and older, VM space can be segmented for
2888 * different MTYPEs
2889 * - Need to allow room for fragmentation, guard pages etc.
2890 *
2891 * This adds up to a rough guess of system memory x3.
2892 * Round up to power of two to maximize the available
2893 * VM size with the given page table size.
2894 */
2895 si_meminfo(&si);
2896 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2897 (1 << 30) - 1) >> 30;
2898 vm_size = roundup_pow_of_two(
2899 min(max(phys_ram_gb * 3, min_vm_size), max_size));
f3368128 2900 }
fdd5faaa
CK
2901
2902 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
36539dce
CK
2903
2904 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
97489129
CK
2905 if (amdgpu_vm_block_size != -1)
2906 tmp >>= amdgpu_vm_block_size - 9;
36539dce
CK
2907 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2908 adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
196f7489
CZ
2909 switch (adev->vm_manager.num_level) {
2910 case 3:
2911 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2912 break;
2913 case 2:
2914 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2915 break;
2916 case 1:
2917 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2918 break;
2919 default:
2920 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2921 }
b38f41eb 2922 /* block size depends on vm size and hw setup*/
97489129 2923 if (amdgpu_vm_block_size != -1)
bab4fee7 2924 adev->vm_manager.block_size =
97489129
CK
2925 min((unsigned)amdgpu_vm_block_size, max_bits
2926 - AMDGPU_GPU_PAGE_SHIFT
2927 - 9 * adev->vm_manager.num_level);
2928 else if (adev->vm_manager.num_level > 1)
2929 adev->vm_manager.block_size = 9;
bab4fee7 2930 else
97489129 2931 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
bab4fee7 2932
b38f41eb
CK
2933 if (amdgpu_vm_fragment_size == -1)
2934 adev->vm_manager.fragment_size = fragment_size_default;
2935 else
2936 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
d07f14be 2937
36539dce
CK
2938 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2939 vm_size, adev->vm_manager.num_level + 1,
2940 adev->vm_manager.block_size,
fdd5faaa 2941 adev->vm_manager.fragment_size);
bab4fee7
JZ
2942}
2943
240cd9a6
OZ
2944static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
2945{
2946 struct amdgpu_retryfault_hashtable *fault_hash;
2947
2948 fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
2949 if (!fault_hash)
2950 return fault_hash;
2951
2952 INIT_CHASH_TABLE(fault_hash->hash,
2953 AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
2954 spin_lock_init(&fault_hash->lock);
2955 fault_hash->count = 0;
2956
2957 return fault_hash;
2958}
2959
d38ceaf9
AD
2960/**
2961 * amdgpu_vm_init - initialize a vm instance
2962 *
2963 * @adev: amdgpu_device pointer
2964 * @vm: requested vm
9a4b7d4c 2965 * @vm_context: Indicates if it GFX or Compute context
00553cf8 2966 * @pasid: Process address space identifier
d38ceaf9 2967 *
8843dbbb 2968 * Init @vm fields.
7fc48e59
AG
2969 *
2970 * Returns:
2971 * 0 for success, error for failure.
d38ceaf9 2972 */
9a4b7d4c 2973int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
02208441 2974 int vm_context, unsigned int pasid)
d38ceaf9 2975{
3216c6b7 2976 struct amdgpu_bo_param bp;
3f4299be 2977 struct amdgpu_bo *root;
36bbf3bf 2978 int r, i;
d38ceaf9 2979
f808c13f 2980 vm->va = RB_ROOT_CACHED;
36bbf3bf
CZ
2981 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2982 vm->reserved_vmid[i] = NULL;
3f3333f8 2983 INIT_LIST_HEAD(&vm->evicted);
ea09729c 2984 INIT_LIST_HEAD(&vm->relocated);
27c7b9ae 2985 INIT_LIST_HEAD(&vm->moved);
806f043f 2986 INIT_LIST_HEAD(&vm->idle);
c12a2ee5
CK
2987 INIT_LIST_HEAD(&vm->invalidated);
2988 spin_lock_init(&vm->invalidated_lock);
d38ceaf9 2989 INIT_LIST_HEAD(&vm->freed);
20250215 2990
2bd9ccfa 2991 /* create scheduler entity for page table updates */
3798e9a6
CK
2992 r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
2993 adev->vm_manager.vm_pte_num_rqs, NULL);
2bd9ccfa 2994 if (r)
f566ceb1 2995 return r;
2bd9ccfa 2996
51ac7eec
YZ
2997 vm->pte_support_ats = false;
2998
2999 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
9a4b7d4c
HK
3000 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3001 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
51ac7eec 3002
13307f7e 3003 if (adev->asic_type == CHIP_RAVEN)
51ac7eec 3004 vm->pte_support_ats = true;
13307f7e 3005 } else {
9a4b7d4c
HK
3006 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3007 AMDGPU_VM_USE_CPU_FOR_GFX);
13307f7e 3008 }
9a4b7d4c
HK
3009 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3010 vm->use_cpu_for_update ? "CPU" : "SDMA");
c8c5e569 3011 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
9a4b7d4c 3012 "CPU update of VM recommended only for large BAR system\n");
d5884513 3013 vm->last_update = NULL;
05906dec 3014
e21eb261 3015 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
03e9dee1
FK
3016 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
3017 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
3f4299be 3018 r = amdgpu_bo_create(adev, &bp, &root);
d38ceaf9 3019 if (r)
2bd9ccfa
CK
3020 goto error_free_sched_entity;
3021
3f4299be 3022 r = amdgpu_bo_reserve(root, true);
d3aab672
CK
3023 if (r)
3024 goto error_free_root;
3025
3f4299be 3026 r = amdgpu_vm_clear_bo(adev, vm, root,
4584312d
CK
3027 adev->vm_manager.root_level,
3028 vm->pte_support_ats);
13307f7e
CK
3029 if (r)
3030 goto error_unreserve;
3031
3f4299be 3032 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
d3aab672 3033 amdgpu_bo_unreserve(vm->root.base.bo);
d38ceaf9 3034
02208441
FK
3035 if (pasid) {
3036 unsigned long flags;
3037
3038 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3039 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
3040 GFP_ATOMIC);
3041 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3042 if (r < 0)
3043 goto error_free_root;
3044
3045 vm->pasid = pasid;
0a096fb6
CK
3046 }
3047
240cd9a6
OZ
3048 vm->fault_hash = init_fault_hash();
3049 if (!vm->fault_hash) {
3050 r = -ENOMEM;
3051 goto error_free_root;
3052 }
3053
a2f14820 3054 INIT_KFIFO(vm->faults);
c98171cc 3055 vm->fault_credit = 16;
d38ceaf9
AD
3056
3057 return 0;
2bd9ccfa 3058
13307f7e
CK
3059error_unreserve:
3060 amdgpu_bo_unreserve(vm->root.base.bo);
3061
67003a15 3062error_free_root:
3f3333f8
CK
3063 amdgpu_bo_unref(&vm->root.base.bo->shadow);
3064 amdgpu_bo_unref(&vm->root.base.bo);
3065 vm->root.base.bo = NULL;
2bd9ccfa
CK
3066
3067error_free_sched_entity:
cdc50176 3068 drm_sched_entity_destroy(&vm->entity);
2bd9ccfa
CK
3069
3070 return r;
d38ceaf9
AD
3071}
3072
b236fa1d
FK
3073/**
3074 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
3075 *
7fc48e59
AG
3076 * @adev: amdgpu_device pointer
3077 * @vm: requested vm
3078 *
b236fa1d
FK
3079 * This only works on GFX VMs that don't have any BOs added and no
3080 * page tables allocated yet.
3081 *
3082 * Changes the following VM parameters:
3083 * - use_cpu_for_update
3084 * - pte_supports_ats
3085 * - pasid (old PASID is released, because compute manages its own PASIDs)
3086 *
3087 * Reinitializes the page directory to reflect the changed ATS
b5d21aac 3088 * setting.
b236fa1d 3089 *
7fc48e59
AG
3090 * Returns:
3091 * 0 for success, -errno for errors.
b236fa1d 3092 */
1685b01a 3093int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
b236fa1d
FK
3094{
3095 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
3096 int r;
3097
3098 r = amdgpu_bo_reserve(vm->root.base.bo, true);
3099 if (r)
3100 return r;
3101
3102 /* Sanity checks */
3103 if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
3104 r = -EINVAL;
1685b01a
OZ
3105 goto unreserve_bo;
3106 }
3107
3108 if (pasid) {
3109 unsigned long flags;
3110
3111 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3112 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
3113 GFP_ATOMIC);
3114 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3115
3116 if (r == -ENOSPC)
3117 goto unreserve_bo;
3118 r = 0;
b236fa1d
FK
3119 }
3120
3121 /* Check if PD needs to be reinitialized and do it before
3122 * changing any other state, in case it fails.
3123 */
3124 if (pte_support_ats != vm->pte_support_ats) {
3125 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
3126 adev->vm_manager.root_level,
3127 pte_support_ats);
3128 if (r)
1685b01a 3129 goto free_idr;
b236fa1d
FK
3130 }
3131
3132 /* Update VM state */
3133 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
3134 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
3135 vm->pte_support_ats = pte_support_ats;
3136 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3137 vm->use_cpu_for_update ? "CPU" : "SDMA");
c8c5e569 3138 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
b236fa1d
FK
3139 "CPU update of VM recommended only for large BAR system\n");
3140
3141 if (vm->pasid) {
3142 unsigned long flags;
3143
3144 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3145 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3146 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3147
1685b01a
OZ
3148 /* Free the original amdgpu allocated pasid
3149 * Will be replaced with kfd allocated pasid
3150 */
3151 amdgpu_pasid_free(vm->pasid);
b236fa1d
FK
3152 vm->pasid = 0;
3153 }
3154
b5d21aac
SL
3155 /* Free the shadow bo for compute VM */
3156 amdgpu_bo_unref(&vm->root.base.bo->shadow);
3157
1685b01a
OZ
3158 if (pasid)
3159 vm->pasid = pasid;
3160
3161 goto unreserve_bo;
3162
3163free_idr:
3164 if (pasid) {
3165 unsigned long flags;
3166
3167 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3168 idr_remove(&adev->vm_manager.pasid_idr, pasid);
3169 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3170 }
3171unreserve_bo:
b236fa1d
FK
3172 amdgpu_bo_unreserve(vm->root.base.bo);
3173 return r;
3174}
3175
bf47afba
OZ
3176/**
3177 * amdgpu_vm_release_compute - release a compute vm
3178 * @adev: amdgpu_device pointer
3179 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
3180 *
3181 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
3182 * pasid from vm. Compute should stop use of vm after this call.
3183 */
3184void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3185{
3186 if (vm->pasid) {
3187 unsigned long flags;
3188
3189 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3190 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3191 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3192 }
3193 vm->pasid = 0;
3194}
3195
f566ceb1
CK
3196/**
3197 * amdgpu_vm_free_levels - free PD/PT levels
3198 *
8f19cd78
CK
3199 * @adev: amdgpu device structure
3200 * @parent: PD/PT starting level to free
3201 * @level: level of parent structure
f566ceb1
CK
3202 *
3203 * Free the page directory or page table level and all sub levels.
3204 */
8f19cd78
CK
3205static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
3206 struct amdgpu_vm_pt *parent,
3207 unsigned level)
f566ceb1 3208{
8f19cd78 3209 unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
f566ceb1 3210
8f19cd78
CK
3211 if (parent->base.bo) {
3212 list_del(&parent->base.bo_list);
3213 list_del(&parent->base.vm_status);
3214 amdgpu_bo_unref(&parent->base.bo->shadow);
3215 amdgpu_bo_unref(&parent->base.bo);
f566ceb1
CK
3216 }
3217
8f19cd78
CK
3218 if (parent->entries)
3219 for (i = 0; i < num_entries; i++)
3220 amdgpu_vm_free_levels(adev, &parent->entries[i],
3221 level + 1);
f566ceb1 3222
8f19cd78 3223 kvfree(parent->entries);
f566ceb1
CK
3224}
3225
d38ceaf9
AD
3226/**
3227 * amdgpu_vm_fini - tear down a vm instance
3228 *
3229 * @adev: amdgpu_device pointer
3230 * @vm: requested vm
3231 *
8843dbbb 3232 * Tear down @vm.
d38ceaf9
AD
3233 * Unbind the VM and remove all bos from the vm bo list
3234 */
3235void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3236{
3237 struct amdgpu_bo_va_mapping *mapping, *tmp;
132f34e4 3238 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2642cf11 3239 struct amdgpu_bo *root;
a2f14820 3240 u64 fault;
2642cf11 3241 int i, r;
d38ceaf9 3242
ede0dd86
FK
3243 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
3244
a2f14820
FK
3245 /* Clear pending page faults from IH when the VM is destroyed */
3246 while (kfifo_get(&vm->faults, &fault))
240cd9a6 3247 amdgpu_vm_clear_fault(vm->fault_hash, fault);
a2f14820 3248
02208441
FK
3249 if (vm->pasid) {
3250 unsigned long flags;
3251
3252 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3253 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3254 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3255 }
3256
240cd9a6
OZ
3257 kfree(vm->fault_hash);
3258 vm->fault_hash = NULL;
3259
cdc50176 3260 drm_sched_entity_destroy(&vm->entity);
2bd9ccfa 3261
f808c13f 3262 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
d38ceaf9
AD
3263 dev_err(adev->dev, "still active bo inside vm\n");
3264 }
f808c13f
DB
3265 rbtree_postorder_for_each_entry_safe(mapping, tmp,
3266 &vm->va.rb_root, rb) {
d38ceaf9 3267 list_del(&mapping->list);
a9f87f64 3268 amdgpu_vm_it_remove(mapping, &vm->va);
d38ceaf9
AD
3269 kfree(mapping);
3270 }
3271 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
4388fc2a 3272 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
451bc8eb 3273 amdgpu_vm_prt_fini(adev, vm);
4388fc2a 3274 prt_fini_needed = false;
451bc8eb 3275 }
284710fa 3276
d38ceaf9 3277 list_del(&mapping->list);
451bc8eb 3278 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
d38ceaf9
AD
3279 }
3280
2642cf11
CK
3281 root = amdgpu_bo_ref(vm->root.base.bo);
3282 r = amdgpu_bo_reserve(root, true);
3283 if (r) {
3284 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
3285 } else {
196f7489
CZ
3286 amdgpu_vm_free_levels(adev, &vm->root,
3287 adev->vm_manager.root_level);
2642cf11
CK
3288 amdgpu_bo_unreserve(root);
3289 }
3290 amdgpu_bo_unref(&root);
d5884513 3291 dma_fence_put(vm->last_update);
1e9ef26f 3292 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
620f774f 3293 amdgpu_vmid_free_reserved(adev, vm, i);
d38ceaf9 3294}
ea89f8c9 3295
c98171cc
FK
3296/**
3297 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
3298 *
3299 * @adev: amdgpu_device pointer
3300 * @pasid: PASID do identify the VM
3301 *
7fc48e59
AG
3302 * This function is expected to be called in interrupt context.
3303 *
3304 * Returns:
3305 * True if there was fault credit, false otherwise
c98171cc
FK
3306 */
3307bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
3308 unsigned int pasid)
3309{
3310 struct amdgpu_vm *vm;
3311
3312 spin_lock(&adev->vm_manager.pasid_lock);
3313 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
d958939a 3314 if (!vm) {
c98171cc 3315 /* VM not found, can't track fault credit */
d958939a 3316 spin_unlock(&adev->vm_manager.pasid_lock);
c98171cc 3317 return true;
d958939a 3318 }
c98171cc
FK
3319
3320 /* No lock needed. only accessed by IRQ handler */
d958939a 3321 if (!vm->fault_credit) {
c98171cc 3322 /* Too many faults in this VM */
d958939a 3323 spin_unlock(&adev->vm_manager.pasid_lock);
c98171cc 3324 return false;
d958939a 3325 }
c98171cc
FK
3326
3327 vm->fault_credit--;
d958939a 3328 spin_unlock(&adev->vm_manager.pasid_lock);
c98171cc
FK
3329 return true;
3330}
3331
a9a78b32
CK
3332/**
3333 * amdgpu_vm_manager_init - init the VM manager
3334 *
3335 * @adev: amdgpu_device pointer
3336 *
3337 * Initialize the VM manager structures
3338 */
3339void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3340{
620f774f 3341 unsigned i;
a9a78b32 3342
620f774f 3343 amdgpu_vmid_mgr_init(adev);
2d55e45a 3344
f54d1867
CW
3345 adev->vm_manager.fence_context =
3346 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1fbb2e92
CK
3347 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3348 adev->vm_manager.seqno[i] = 0;
3349
284710fa 3350 spin_lock_init(&adev->vm_manager.prt_lock);
451bc8eb 3351 atomic_set(&adev->vm_manager.num_prt_users, 0);
9a4b7d4c
HK
3352
3353 /* If not overridden by the user, by default, only in large BAR systems
3354 * Compute VM tables will be updated by CPU
3355 */
3356#ifdef CONFIG_X86_64
3357 if (amdgpu_vm_update_mode == -1) {
c8c5e569 3358 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
9a4b7d4c
HK
3359 adev->vm_manager.vm_update_mode =
3360 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3361 else
3362 adev->vm_manager.vm_update_mode = 0;
3363 } else
3364 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3365#else
3366 adev->vm_manager.vm_update_mode = 0;
3367#endif
3368
02208441
FK
3369 idr_init(&adev->vm_manager.pasid_idr);
3370 spin_lock_init(&adev->vm_manager.pasid_lock);
a9a78b32
CK
3371}
3372
ea89f8c9
CK
3373/**
3374 * amdgpu_vm_manager_fini - cleanup VM manager
3375 *
3376 * @adev: amdgpu_device pointer
3377 *
3378 * Cleanup the VM manager and free resources.
3379 */
3380void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3381{
02208441
FK
3382 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3383 idr_destroy(&adev->vm_manager.pasid_idr);
3384
620f774f 3385 amdgpu_vmid_mgr_fini(adev);
ea89f8c9 3386}
cfbcacf4 3387
7fc48e59
AG
3388/**
3389 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3390 *
3391 * @dev: drm device pointer
3392 * @data: drm_amdgpu_vm
3393 * @filp: drm file pointer
3394 *
3395 * Returns:
3396 * 0 for success, -errno for errors.
3397 */
cfbcacf4
CZ
3398int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3399{
3400 union drm_amdgpu_vm *args = data;
1e9ef26f
CZ
3401 struct amdgpu_device *adev = dev->dev_private;
3402 struct amdgpu_fpriv *fpriv = filp->driver_priv;
3403 int r;
cfbcacf4
CZ
3404
3405 switch (args->in.op) {
3406 case AMDGPU_VM_OP_RESERVE_VMID:
1e9ef26f 3407 /* current, we only have requirement to reserve vmid from gfxhub */
620f774f 3408 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
1e9ef26f
CZ
3409 if (r)
3410 return r;
3411 break;
cfbcacf4 3412 case AMDGPU_VM_OP_UNRESERVE_VMID:
620f774f 3413 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
cfbcacf4
CZ
3414 break;
3415 default:
3416 return -EINVAL;
3417 }
3418
3419 return 0;
3420}
2aa37bf5
AG
3421
3422/**
3423 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3424 *
989edc69 3425 * @adev: drm device pointer
2aa37bf5
AG
3426 * @pasid: PASID identifier for VM
3427 * @task_info: task_info to fill.
3428 */
3429void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3430 struct amdgpu_task_info *task_info)
3431{
3432 struct amdgpu_vm *vm;
3433
3434 spin_lock(&adev->vm_manager.pasid_lock);
3435
3436 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3437 if (vm)
3438 *task_info = vm->task_info;
3439
3440 spin_unlock(&adev->vm_manager.pasid_lock);
3441}
3442
3443/**
3444 * amdgpu_vm_set_task_info - Sets VMs task info.
3445 *
3446 * @vm: vm for which to set the info
3447 */
3448void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3449{
3450 if (!vm->task_info.pid) {
3451 vm->task_info.pid = current->pid;
3452 get_task_comm(vm->task_info.task_name, current);
3453
3454 if (current->group_leader->mm == current->mm) {
3455 vm->task_info.tgid = current->group_leader->pid;
3456 get_task_comm(vm->task_info.process_name, current->group_leader);
3457 }
3458 }
3459}
240cd9a6
OZ
3460
3461/**
3462 * amdgpu_vm_add_fault - Add a page fault record to fault hash table
3463 *
3464 * @fault_hash: fault hash table
3465 * @key: 64-bit encoding of PASID and address
3466 *
3467 * This should be called when a retry page fault interrupt is
3468 * received. If this is a new page fault, it will be added to a hash
3469 * table. The return value indicates whether this is a new fault, or
3470 * a fault that was already known and is already being handled.
3471 *
3472 * If there are too many pending page faults, this will fail. Retry
3473 * interrupts should be ignored in this case until there is enough
3474 * free space.
3475 *
3476 * Returns 0 if the fault was added, 1 if the fault was already known,
3477 * -ENOSPC if there are too many pending faults.
3478 */
3479int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
3480{
3481 unsigned long flags;
3482 int r = -ENOSPC;
3483
3484 if (WARN_ON_ONCE(!fault_hash))
3485 /* Should be allocated in amdgpu_vm_init
3486 */
3487 return r;
3488
3489 spin_lock_irqsave(&fault_hash->lock, flags);
3490
3491 /* Only let the hash table fill up to 50% for best performance */
3492 if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
3493 goto unlock_out;
3494
3495 r = chash_table_copy_in(&fault_hash->hash, key, NULL);
3496 if (!r)
3497 fault_hash->count++;
3498
3499 /* chash_table_copy_in should never fail unless we're losing count */
3500 WARN_ON_ONCE(r < 0);
3501
3502unlock_out:
3503 spin_unlock_irqrestore(&fault_hash->lock, flags);
3504 return r;
3505}
3506
3507/**
3508 * amdgpu_vm_clear_fault - Remove a page fault record
3509 *
3510 * @fault_hash: fault hash table
3511 * @key: 64-bit encoding of PASID and address
3512 *
3513 * This should be called when a page fault has been handled. Any
3514 * future interrupt with this key will be processed as a new
3515 * page fault.
3516 */
3517void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
3518{
3519 unsigned long flags;
3520 int r;
3521
3522 if (!fault_hash)
3523 return;
3524
3525 spin_lock_irqsave(&fault_hash->lock, flags);
3526
3527 r = chash_table_remove(&fault_hash->hash, key, NULL);
3528 if (!WARN_ON_ONCE(r < 0)) {
3529 fault_hash->count--;
3530 WARN_ON_ONCE(fault_hash->count < 0);
3531 }
3532
3533 spin_unlock_irqrestore(&fault_hash->lock, flags);
3534}