]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
f54d1867 | 28 | #include <linux/dma-fence-array.h> |
a9f87f64 | 29 | #include <linux/interval_tree_generic.h> |
02208441 | 30 | #include <linux/idr.h> |
d38ceaf9 AD |
31 | #include <drm/drmP.h> |
32 | #include <drm/amdgpu_drm.h> | |
33 | #include "amdgpu.h" | |
34 | #include "amdgpu_trace.h" | |
ede0dd86 | 35 | #include "amdgpu_amdkfd.h" |
c8c5e569 | 36 | #include "amdgpu_gmc.h" |
d38ceaf9 | 37 | |
7fc48e59 AG |
38 | /** |
39 | * DOC: GPUVM | |
40 | * | |
d38ceaf9 AD |
41 | * GPUVM is similar to the legacy gart on older asics, however |
42 | * rather than there being a single global gart table | |
43 | * for the entire GPU, there are multiple VM page tables active | |
44 | * at any given time. The VM page tables can contain a mix | |
45 | * vram pages and system memory pages and system memory pages | |
46 | * can be mapped as snooped (cached system pages) or unsnooped | |
47 | * (uncached system pages). | |
48 | * Each VM has an ID associated with it and there is a page table | |
49 | * associated with each VMID. When execting a command buffer, | |
50 | * the kernel tells the the ring what VMID to use for that command | |
51 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
52 | * The userspace drivers maintain their own address space and the kernel | |
53 | * sets up their pages tables accordingly when they submit their | |
54 | * command buffers and a VMID is assigned. | |
55 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
56 | * SI supports 16. | |
57 | */ | |
58 | ||
a9f87f64 CK |
59 | #define START(node) ((node)->start) |
60 | #define LAST(node) ((node)->last) | |
61 | ||
62 | INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, | |
63 | START, LAST, static, amdgpu_vm_it) | |
64 | ||
65 | #undef START | |
66 | #undef LAST | |
67 | ||
7fc48e59 AG |
68 | /** |
69 | * struct amdgpu_pte_update_params - Local structure | |
70 | * | |
71 | * Encapsulate some VM table update parameters to reduce | |
f4833c4f | 72 | * the number of function parameters |
7fc48e59 | 73 | * |
f4833c4f | 74 | */ |
29efc4f5 | 75 | struct amdgpu_pte_update_params { |
7fc48e59 AG |
76 | |
77 | /** | |
78 | * @adev: amdgpu device we do this update for | |
79 | */ | |
27c5f36f | 80 | struct amdgpu_device *adev; |
7fc48e59 AG |
81 | |
82 | /** | |
83 | * @vm: optional amdgpu_vm we do this update for | |
84 | */ | |
49ac8a24 | 85 | struct amdgpu_vm *vm; |
7fc48e59 AG |
86 | |
87 | /** | |
88 | * @src: address where to copy page table entries from | |
89 | */ | |
f4833c4f | 90 | uint64_t src; |
7fc48e59 AG |
91 | |
92 | /** | |
93 | * @ib: indirect buffer to fill with commands | |
94 | */ | |
f4833c4f | 95 | struct amdgpu_ib *ib; |
7fc48e59 AG |
96 | |
97 | /** | |
98 | * @func: Function which actually does the update | |
99 | */ | |
373ac645 CK |
100 | void (*func)(struct amdgpu_pte_update_params *params, |
101 | struct amdgpu_bo *bo, uint64_t pe, | |
afef8b8f | 102 | uint64_t addr, unsigned count, uint32_t incr, |
6b777607 | 103 | uint64_t flags); |
7fc48e59 AG |
104 | /** |
105 | * @pages_addr: | |
106 | * | |
107 | * DMA addresses to use for mapping, used during VM update by CPU | |
b4d42511 HK |
108 | */ |
109 | dma_addr_t *pages_addr; | |
f4833c4f HK |
110 | }; |
111 | ||
7fc48e59 AG |
112 | /** |
113 | * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback | |
114 | */ | |
284710fa | 115 | struct amdgpu_prt_cb { |
7fc48e59 AG |
116 | |
117 | /** | |
118 | * @adev: amdgpu device | |
119 | */ | |
284710fa | 120 | struct amdgpu_device *adev; |
7fc48e59 AG |
121 | |
122 | /** | |
123 | * @cb: callback | |
124 | */ | |
284710fa CK |
125 | struct dma_fence_cb cb; |
126 | }; | |
127 | ||
50783147 CK |
128 | /** |
129 | * amdgpu_vm_level_shift - return the addr shift for each level | |
130 | * | |
131 | * @adev: amdgpu_device pointer | |
7fc48e59 | 132 | * @level: VMPT level |
50783147 | 133 | * |
7fc48e59 AG |
134 | * Returns: |
135 | * The number of bits the pfn needs to be right shifted for a level. | |
50783147 CK |
136 | */ |
137 | static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, | |
138 | unsigned level) | |
139 | { | |
196f7489 CZ |
140 | unsigned shift = 0xff; |
141 | ||
142 | switch (level) { | |
143 | case AMDGPU_VM_PDB2: | |
144 | case AMDGPU_VM_PDB1: | |
145 | case AMDGPU_VM_PDB0: | |
146 | shift = 9 * (AMDGPU_VM_PDB0 - level) + | |
50783147 | 147 | adev->vm_manager.block_size; |
196f7489 CZ |
148 | break; |
149 | case AMDGPU_VM_PTB: | |
150 | shift = 0; | |
151 | break; | |
152 | default: | |
153 | dev_err(adev->dev, "the level%d isn't supported.\n", level); | |
154 | } | |
155 | ||
156 | return shift; | |
50783147 CK |
157 | } |
158 | ||
d38ceaf9 | 159 | /** |
72a7ec5c | 160 | * amdgpu_vm_num_entries - return the number of entries in a PD/PT |
d38ceaf9 AD |
161 | * |
162 | * @adev: amdgpu_device pointer | |
7fc48e59 | 163 | * @level: VMPT level |
d38ceaf9 | 164 | * |
7fc48e59 AG |
165 | * Returns: |
166 | * The number of entries in a page directory or page table. | |
d38ceaf9 | 167 | */ |
72a7ec5c CK |
168 | static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, |
169 | unsigned level) | |
d38ceaf9 | 170 | { |
196f7489 CZ |
171 | unsigned shift = amdgpu_vm_level_shift(adev, |
172 | adev->vm_manager.root_level); | |
0410c5e5 | 173 | |
196f7489 | 174 | if (level == adev->vm_manager.root_level) |
72a7ec5c | 175 | /* For the root directory */ |
9ce2b991 | 176 | return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift; |
196f7489 | 177 | else if (level != AMDGPU_VM_PTB) |
0410c5e5 CK |
178 | /* Everything in between */ |
179 | return 512; | |
180 | else | |
72a7ec5c | 181 | /* For the page tables on the leaves */ |
36b32a68 | 182 | return AMDGPU_VM_PTE_COUNT(adev); |
d38ceaf9 AD |
183 | } |
184 | ||
780637cb CK |
185 | /** |
186 | * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD | |
187 | * | |
188 | * @adev: amdgpu_device pointer | |
189 | * | |
190 | * Returns: | |
191 | * The number of entries in the root page directory which needs the ATS setting. | |
192 | */ | |
193 | static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev) | |
194 | { | |
195 | unsigned shift; | |
196 | ||
197 | shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level); | |
198 | return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT); | |
199 | } | |
200 | ||
cb90b97b CK |
201 | /** |
202 | * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT | |
203 | * | |
204 | * @adev: amdgpu_device pointer | |
205 | * @level: VMPT level | |
206 | * | |
207 | * Returns: | |
208 | * The mask to extract the entry number of a PD/PT from an address. | |
209 | */ | |
210 | static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev, | |
211 | unsigned int level) | |
212 | { | |
213 | if (level <= adev->vm_manager.root_level) | |
214 | return 0xffffffff; | |
215 | else if (level != AMDGPU_VM_PTB) | |
216 | return 0x1ff; | |
217 | else | |
218 | return AMDGPU_VM_PTE_COUNT(adev) - 1; | |
219 | } | |
220 | ||
d38ceaf9 | 221 | /** |
72a7ec5c | 222 | * amdgpu_vm_bo_size - returns the size of the BOs in bytes |
d38ceaf9 AD |
223 | * |
224 | * @adev: amdgpu_device pointer | |
7fc48e59 | 225 | * @level: VMPT level |
d38ceaf9 | 226 | * |
7fc48e59 AG |
227 | * Returns: |
228 | * The size of the BO for a page directory or page table in bytes. | |
d38ceaf9 | 229 | */ |
72a7ec5c | 230 | static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) |
d38ceaf9 | 231 | { |
72a7ec5c | 232 | return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); |
d38ceaf9 AD |
233 | } |
234 | ||
bcdc9fd6 CK |
235 | /** |
236 | * amdgpu_vm_bo_evicted - vm_bo is evicted | |
237 | * | |
238 | * @vm_bo: vm_bo which is evicted | |
239 | * | |
240 | * State for PDs/PTs and per VM BOs which are not at the location they should | |
241 | * be. | |
242 | */ | |
243 | static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) | |
244 | { | |
245 | struct amdgpu_vm *vm = vm_bo->vm; | |
246 | struct amdgpu_bo *bo = vm_bo->bo; | |
247 | ||
248 | vm_bo->moved = true; | |
249 | if (bo->tbo.type == ttm_bo_type_kernel) | |
250 | list_move(&vm_bo->vm_status, &vm->evicted); | |
251 | else | |
252 | list_move_tail(&vm_bo->vm_status, &vm->evicted); | |
253 | } | |
254 | ||
255 | /** | |
256 | * amdgpu_vm_bo_relocated - vm_bo is reloacted | |
257 | * | |
258 | * @vm_bo: vm_bo which is relocated | |
259 | * | |
260 | * State for PDs/PTs which needs to update their parent PD. | |
261 | */ | |
262 | static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) | |
263 | { | |
264 | list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); | |
265 | } | |
266 | ||
267 | /** | |
268 | * amdgpu_vm_bo_moved - vm_bo is moved | |
269 | * | |
270 | * @vm_bo: vm_bo which is moved | |
271 | * | |
272 | * State for per VM BOs which are moved, but that change is not yet reflected | |
273 | * in the page tables. | |
274 | */ | |
275 | static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) | |
276 | { | |
277 | list_move(&vm_bo->vm_status, &vm_bo->vm->moved); | |
278 | } | |
279 | ||
280 | /** | |
281 | * amdgpu_vm_bo_idle - vm_bo is idle | |
282 | * | |
283 | * @vm_bo: vm_bo which is now idle | |
284 | * | |
285 | * State for PDs/PTs and per VM BOs which have gone through the state machine | |
286 | * and are now idle. | |
287 | */ | |
288 | static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) | |
289 | { | |
290 | list_move(&vm_bo->vm_status, &vm_bo->vm->idle); | |
291 | vm_bo->moved = false; | |
292 | } | |
293 | ||
294 | /** | |
295 | * amdgpu_vm_bo_invalidated - vm_bo is invalidated | |
296 | * | |
297 | * @vm_bo: vm_bo which is now invalidated | |
298 | * | |
299 | * State for normal BOs which are invalidated and that change not yet reflected | |
300 | * in the PTs. | |
301 | */ | |
302 | static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) | |
303 | { | |
304 | spin_lock(&vm_bo->vm->invalidated_lock); | |
305 | list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); | |
306 | spin_unlock(&vm_bo->vm->invalidated_lock); | |
307 | } | |
308 | ||
309 | /** | |
310 | * amdgpu_vm_bo_done - vm_bo is done | |
311 | * | |
312 | * @vm_bo: vm_bo which is now done | |
313 | * | |
314 | * State for normal BOs which are invalidated and that change has been updated | |
315 | * in the PTs. | |
316 | */ | |
317 | static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) | |
318 | { | |
319 | spin_lock(&vm_bo->vm->invalidated_lock); | |
320 | list_del_init(&vm_bo->vm_status); | |
321 | spin_unlock(&vm_bo->vm->invalidated_lock); | |
322 | } | |
323 | ||
c460f8a6 CK |
324 | /** |
325 | * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm | |
326 | * | |
327 | * @base: base structure for tracking BO usage in a VM | |
328 | * @vm: vm to which bo is to be added | |
329 | * @bo: amdgpu buffer object | |
330 | * | |
331 | * Initialize a bo_va_base structure and add it to the appropriate lists | |
332 | * | |
333 | */ | |
334 | static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, | |
335 | struct amdgpu_vm *vm, | |
336 | struct amdgpu_bo *bo) | |
337 | { | |
338 | base->vm = vm; | |
339 | base->bo = bo; | |
646b9025 | 340 | base->next = NULL; |
c460f8a6 CK |
341 | INIT_LIST_HEAD(&base->vm_status); |
342 | ||
343 | if (!bo) | |
344 | return; | |
646b9025 CK |
345 | base->next = bo->vm_bo; |
346 | bo->vm_bo = base; | |
c460f8a6 CK |
347 | |
348 | if (bo->tbo.resv != vm->root.base.bo->tbo.resv) | |
349 | return; | |
350 | ||
351 | vm->bulk_moveable = false; | |
352 | if (bo->tbo.type == ttm_bo_type_kernel) | |
bcdc9fd6 | 353 | amdgpu_vm_bo_relocated(base); |
c460f8a6 | 354 | else |
bcdc9fd6 | 355 | amdgpu_vm_bo_idle(base); |
c460f8a6 CK |
356 | |
357 | if (bo->preferred_domains & | |
358 | amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) | |
359 | return; | |
360 | ||
361 | /* | |
362 | * we checked all the prerequisites, but it looks like this per vm bo | |
363 | * is currently evicted. add the bo to the evicted list to make sure it | |
364 | * is validated on next vm use to avoid fault. | |
365 | * */ | |
bcdc9fd6 | 366 | amdgpu_vm_bo_evicted(base); |
c460f8a6 CK |
367 | } |
368 | ||
ba79fde4 CK |
369 | /** |
370 | * amdgpu_vm_pt_parent - get the parent page directory | |
371 | * | |
372 | * @pt: child page table | |
373 | * | |
374 | * Helper to get the parent entry for the child page table. NULL if we are at | |
375 | * the root page directory. | |
376 | */ | |
377 | static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) | |
378 | { | |
379 | struct amdgpu_bo *parent = pt->base.bo->parent; | |
380 | ||
381 | if (!parent) | |
382 | return NULL; | |
383 | ||
646b9025 | 384 | return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); |
ba79fde4 CK |
385 | } |
386 | ||
73633e32 CK |
387 | /** |
388 | * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt | |
389 | */ | |
390 | struct amdgpu_vm_pt_cursor { | |
391 | uint64_t pfn; | |
392 | struct amdgpu_vm_pt *parent; | |
393 | struct amdgpu_vm_pt *entry; | |
394 | unsigned level; | |
395 | }; | |
396 | ||
397 | /** | |
398 | * amdgpu_vm_pt_start - start PD/PT walk | |
399 | * | |
400 | * @adev: amdgpu_device pointer | |
401 | * @vm: amdgpu_vm structure | |
402 | * @start: start address of the walk | |
403 | * @cursor: state to initialize | |
404 | * | |
405 | * Initialize a amdgpu_vm_pt_cursor to start a walk. | |
406 | */ | |
407 | static void amdgpu_vm_pt_start(struct amdgpu_device *adev, | |
408 | struct amdgpu_vm *vm, uint64_t start, | |
409 | struct amdgpu_vm_pt_cursor *cursor) | |
410 | { | |
411 | cursor->pfn = start; | |
412 | cursor->parent = NULL; | |
413 | cursor->entry = &vm->root; | |
414 | cursor->level = adev->vm_manager.root_level; | |
415 | } | |
416 | ||
417 | /** | |
418 | * amdgpu_vm_pt_descendant - go to child node | |
419 | * | |
420 | * @adev: amdgpu_device pointer | |
421 | * @cursor: current state | |
422 | * | |
423 | * Walk to the child node of the current node. | |
424 | * Returns: | |
425 | * True if the walk was possible, false otherwise. | |
426 | */ | |
427 | static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, | |
428 | struct amdgpu_vm_pt_cursor *cursor) | |
429 | { | |
cb90b97b | 430 | unsigned mask, shift, idx; |
73633e32 CK |
431 | |
432 | if (!cursor->entry->entries) | |
433 | return false; | |
434 | ||
435 | BUG_ON(!cursor->entry->base.bo); | |
cb90b97b | 436 | mask = amdgpu_vm_entries_mask(adev, cursor->level); |
73633e32 CK |
437 | shift = amdgpu_vm_level_shift(adev, cursor->level); |
438 | ||
439 | ++cursor->level; | |
cb90b97b | 440 | idx = (cursor->pfn >> shift) & mask; |
73633e32 CK |
441 | cursor->parent = cursor->entry; |
442 | cursor->entry = &cursor->entry->entries[idx]; | |
443 | return true; | |
444 | } | |
445 | ||
446 | /** | |
447 | * amdgpu_vm_pt_sibling - go to sibling node | |
448 | * | |
449 | * @adev: amdgpu_device pointer | |
450 | * @cursor: current state | |
451 | * | |
452 | * Walk to the sibling node of the current node. | |
453 | * Returns: | |
454 | * True if the walk was possible, false otherwise. | |
455 | */ | |
456 | static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, | |
457 | struct amdgpu_vm_pt_cursor *cursor) | |
458 | { | |
459 | unsigned shift, num_entries; | |
460 | ||
461 | /* Root doesn't have a sibling */ | |
462 | if (!cursor->parent) | |
463 | return false; | |
464 | ||
465 | /* Go to our parents and see if we got a sibling */ | |
466 | shift = amdgpu_vm_level_shift(adev, cursor->level - 1); | |
467 | num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); | |
468 | ||
469 | if (cursor->entry == &cursor->parent->entries[num_entries - 1]) | |
470 | return false; | |
471 | ||
472 | cursor->pfn += 1ULL << shift; | |
473 | cursor->pfn &= ~((1ULL << shift) - 1); | |
474 | ++cursor->entry; | |
475 | return true; | |
476 | } | |
477 | ||
478 | /** | |
479 | * amdgpu_vm_pt_ancestor - go to parent node | |
480 | * | |
481 | * @cursor: current state | |
482 | * | |
483 | * Walk to the parent node of the current node. | |
484 | * Returns: | |
485 | * True if the walk was possible, false otherwise. | |
486 | */ | |
487 | static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor) | |
488 | { | |
489 | if (!cursor->parent) | |
490 | return false; | |
491 | ||
492 | --cursor->level; | |
493 | cursor->entry = cursor->parent; | |
494 | cursor->parent = amdgpu_vm_pt_parent(cursor->parent); | |
495 | return true; | |
496 | } | |
497 | ||
498 | /** | |
499 | * amdgpu_vm_pt_next - get next PD/PT in hieratchy | |
500 | * | |
501 | * @adev: amdgpu_device pointer | |
502 | * @cursor: current state | |
503 | * | |
504 | * Walk the PD/PT tree to the next node. | |
505 | */ | |
506 | static void amdgpu_vm_pt_next(struct amdgpu_device *adev, | |
507 | struct amdgpu_vm_pt_cursor *cursor) | |
508 | { | |
509 | /* First try a newborn child */ | |
510 | if (amdgpu_vm_pt_descendant(adev, cursor)) | |
511 | return; | |
512 | ||
513 | /* If that didn't worked try to find a sibling */ | |
514 | while (!amdgpu_vm_pt_sibling(adev, cursor)) { | |
515 | /* No sibling, go to our parents and grandparents */ | |
516 | if (!amdgpu_vm_pt_ancestor(cursor)) { | |
517 | cursor->pfn = ~0ll; | |
518 | return; | |
519 | } | |
520 | } | |
521 | } | |
522 | ||
523 | /** | |
524 | * amdgpu_vm_pt_first_leaf - get first leaf PD/PT | |
525 | * | |
526 | * @adev: amdgpu_device pointer | |
527 | * @vm: amdgpu_vm structure | |
528 | * @start: start addr of the walk | |
529 | * @cursor: state to initialize | |
530 | * | |
531 | * Start a walk and go directly to the leaf node. | |
532 | */ | |
533 | static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev, | |
534 | struct amdgpu_vm *vm, uint64_t start, | |
535 | struct amdgpu_vm_pt_cursor *cursor) | |
536 | { | |
537 | amdgpu_vm_pt_start(adev, vm, start, cursor); | |
538 | while (amdgpu_vm_pt_descendant(adev, cursor)); | |
539 | } | |
540 | ||
541 | /** | |
542 | * amdgpu_vm_pt_next_leaf - get next leaf PD/PT | |
543 | * | |
544 | * @adev: amdgpu_device pointer | |
545 | * @cursor: current state | |
546 | * | |
547 | * Walk the PD/PT tree to the next leaf node. | |
548 | */ | |
549 | static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev, | |
550 | struct amdgpu_vm_pt_cursor *cursor) | |
551 | { | |
552 | amdgpu_vm_pt_next(adev, cursor); | |
4faaaa76 CK |
553 | if (cursor->pfn != ~0ll) |
554 | while (amdgpu_vm_pt_descendant(adev, cursor)); | |
73633e32 CK |
555 | } |
556 | ||
557 | /** | |
558 | * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy | |
559 | */ | |
560 | #define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \ | |
561 | for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \ | |
562 | (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor))) | |
563 | ||
564 | /** | |
565 | * amdgpu_vm_pt_first_dfs - start a deep first search | |
566 | * | |
567 | * @adev: amdgpu_device structure | |
568 | * @vm: amdgpu_vm structure | |
569 | * @cursor: state to initialize | |
570 | * | |
571 | * Starts a deep first traversal of the PD/PT tree. | |
572 | */ | |
573 | static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, | |
574 | struct amdgpu_vm *vm, | |
575 | struct amdgpu_vm_pt_cursor *cursor) | |
576 | { | |
577 | amdgpu_vm_pt_start(adev, vm, 0, cursor); | |
578 | while (amdgpu_vm_pt_descendant(adev, cursor)); | |
579 | } | |
580 | ||
581 | /** | |
582 | * amdgpu_vm_pt_next_dfs - get the next node for a deep first search | |
583 | * | |
584 | * @adev: amdgpu_device structure | |
585 | * @cursor: current state | |
586 | * | |
587 | * Move the cursor to the next node in a deep first search. | |
588 | */ | |
589 | static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, | |
590 | struct amdgpu_vm_pt_cursor *cursor) | |
591 | { | |
592 | if (!cursor->entry) | |
593 | return; | |
594 | ||
595 | if (!cursor->parent) | |
596 | cursor->entry = NULL; | |
597 | else if (amdgpu_vm_pt_sibling(adev, cursor)) | |
598 | while (amdgpu_vm_pt_descendant(adev, cursor)); | |
599 | else | |
600 | amdgpu_vm_pt_ancestor(cursor); | |
601 | } | |
602 | ||
603 | /** | |
604 | * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs | |
605 | */ | |
606 | #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \ | |
607 | for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \ | |
608 | (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\ | |
609 | (entry); (entry) = (cursor).entry, \ | |
610 | amdgpu_vm_pt_next_dfs((adev), &(cursor))) | |
611 | ||
d38ceaf9 | 612 | /** |
56467ebf | 613 | * amdgpu_vm_get_pd_bo - add the VM PD to a validation list |
d38ceaf9 AD |
614 | * |
615 | * @vm: vm providing the BOs | |
3c0eea6c | 616 | * @validated: head of validation list |
56467ebf | 617 | * @entry: entry to add |
d38ceaf9 AD |
618 | * |
619 | * Add the page directory to the list of BOs to | |
56467ebf | 620 | * validate for command submission. |
d38ceaf9 | 621 | */ |
56467ebf CK |
622 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, |
623 | struct list_head *validated, | |
624 | struct amdgpu_bo_list_entry *entry) | |
d38ceaf9 | 625 | { |
56467ebf | 626 | entry->priority = 0; |
e83dfe4d | 627 | entry->tv.bo = &vm->root.base.bo->tbo; |
07daa8a0 CK |
628 | /* One for the VM updates, one for TTM and one for the CS job */ |
629 | entry->tv.num_shared = 3; | |
2f568dbd | 630 | entry->user_pages = NULL; |
56467ebf CK |
631 | list_add(&entry->tv.head, validated); |
632 | } | |
d38ceaf9 | 633 | |
b61857b5 CZ |
634 | void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) |
635 | { | |
636 | struct amdgpu_bo *abo; | |
637 | struct amdgpu_vm_bo_base *bo_base; | |
638 | ||
639 | if (!amdgpu_bo_is_amdgpu_bo(bo)) | |
640 | return; | |
641 | ||
642 | if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) | |
643 | return; | |
644 | ||
645 | abo = ttm_to_amdgpu_bo(bo); | |
646 | if (!abo->parent) | |
647 | return; | |
648 | for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { | |
649 | struct amdgpu_vm *vm = bo_base->vm; | |
650 | ||
651 | if (abo->tbo.resv == vm->root.base.bo->tbo.resv) | |
652 | vm->bulk_moveable = false; | |
653 | } | |
654 | ||
655 | } | |
f921661b HR |
656 | /** |
657 | * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU | |
658 | * | |
659 | * @adev: amdgpu device pointer | |
660 | * @vm: vm providing the BOs | |
661 | * | |
662 | * Move all BOs to the end of LRU and remember their positions to put them | |
663 | * together. | |
664 | */ | |
665 | void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, | |
666 | struct amdgpu_vm *vm) | |
667 | { | |
668 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | |
669 | struct amdgpu_vm_bo_base *bo_base; | |
670 | ||
671 | if (vm->bulk_moveable) { | |
672 | spin_lock(&glob->lru_lock); | |
673 | ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); | |
674 | spin_unlock(&glob->lru_lock); | |
675 | return; | |
676 | } | |
677 | ||
678 | memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); | |
679 | ||
680 | spin_lock(&glob->lru_lock); | |
681 | list_for_each_entry(bo_base, &vm->idle, vm_status) { | |
682 | struct amdgpu_bo *bo = bo_base->bo; | |
683 | ||
684 | if (!bo->parent) | |
685 | continue; | |
686 | ||
687 | ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); | |
688 | if (bo->shadow) | |
689 | ttm_bo_move_to_lru_tail(&bo->shadow->tbo, | |
690 | &vm->lru_bulk_move); | |
691 | } | |
692 | spin_unlock(&glob->lru_lock); | |
693 | ||
694 | vm->bulk_moveable = true; | |
695 | } | |
696 | ||
670fecc8 | 697 | /** |
f7da30d9 | 698 | * amdgpu_vm_validate_pt_bos - validate the page table BOs |
670fecc8 | 699 | * |
5a712a87 | 700 | * @adev: amdgpu device pointer |
56467ebf | 701 | * @vm: vm providing the BOs |
670fecc8 CK |
702 | * @validate: callback to do the validation |
703 | * @param: parameter for the validation callback | |
704 | * | |
705 | * Validate the page table BOs on command submission if neccessary. | |
7fc48e59 AG |
706 | * |
707 | * Returns: | |
708 | * Validation result. | |
670fecc8 | 709 | */ |
f7da30d9 CK |
710 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
711 | int (*validate)(void *p, struct amdgpu_bo *bo), | |
712 | void *param) | |
670fecc8 | 713 | { |
91ccdd24 CK |
714 | struct amdgpu_vm_bo_base *bo_base, *tmp; |
715 | int r = 0; | |
670fecc8 | 716 | |
91ccdd24 CK |
717 | list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { |
718 | struct amdgpu_bo *bo = bo_base->bo; | |
670fecc8 | 719 | |
262b9c39 CK |
720 | r = validate(param, bo); |
721 | if (r) | |
722 | break; | |
670fecc8 | 723 | |
af4c0f65 | 724 | if (bo->tbo.type != ttm_bo_type_kernel) { |
bcdc9fd6 | 725 | amdgpu_vm_bo_moved(bo_base); |
af4c0f65 | 726 | } else { |
17cc5252 CK |
727 | if (vm->use_cpu_for_update) |
728 | r = amdgpu_bo_kmap(bo, NULL); | |
729 | else | |
730 | r = amdgpu_ttm_alloc_gart(&bo->tbo); | |
284dec43 CK |
731 | if (r) |
732 | break; | |
3d5fe658 CK |
733 | if (bo->shadow) { |
734 | r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo); | |
735 | if (r) | |
736 | break; | |
737 | } | |
bcdc9fd6 | 738 | amdgpu_vm_bo_relocated(bo_base); |
af4c0f65 | 739 | } |
670fecc8 CK |
740 | } |
741 | ||
91ccdd24 | 742 | return r; |
670fecc8 CK |
743 | } |
744 | ||
56467ebf | 745 | /** |
34d7be5d | 746 | * amdgpu_vm_ready - check VM is ready for updates |
56467ebf | 747 | * |
34d7be5d | 748 | * @vm: VM to check |
d38ceaf9 | 749 | * |
34d7be5d | 750 | * Check if all VM PDs/PTs are ready for updates |
7fc48e59 AG |
751 | * |
752 | * Returns: | |
753 | * True if eviction list is empty. | |
d38ceaf9 | 754 | */ |
3f3333f8 | 755 | bool amdgpu_vm_ready(struct amdgpu_vm *vm) |
d38ceaf9 | 756 | { |
af4c0f65 | 757 | return list_empty(&vm->evicted); |
d711e139 CK |
758 | } |
759 | ||
13307f7e CK |
760 | /** |
761 | * amdgpu_vm_clear_bo - initially clear the PDs/PTs | |
762 | * | |
763 | * @adev: amdgpu_device pointer | |
7fc48e59 | 764 | * @vm: VM to clear BO from |
13307f7e | 765 | * @bo: BO to clear |
13307f7e CK |
766 | * |
767 | * Root PD needs to be reserved when calling this. | |
7fc48e59 AG |
768 | * |
769 | * Returns: | |
770 | * 0 on success, errno otherwise. | |
13307f7e CK |
771 | */ |
772 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |
780637cb CK |
773 | struct amdgpu_vm *vm, |
774 | struct amdgpu_bo *bo) | |
13307f7e CK |
775 | { |
776 | struct ttm_operation_ctx ctx = { true, false }; | |
780637cb CK |
777 | unsigned level = adev->vm_manager.root_level; |
778 | struct amdgpu_bo *ancestor = bo; | |
13307f7e | 779 | struct dma_fence *fence = NULL; |
4584312d | 780 | unsigned entries, ats_entries; |
13307f7e CK |
781 | struct amdgpu_ring *ring; |
782 | struct amdgpu_job *job; | |
4584312d | 783 | uint64_t addr; |
13307f7e CK |
784 | int r; |
785 | ||
780637cb CK |
786 | /* Figure out our place in the hierarchy */ |
787 | if (ancestor->parent) { | |
788 | ++level; | |
789 | while (ancestor->parent->parent) { | |
790 | ++level; | |
791 | ancestor = ancestor->parent; | |
792 | } | |
793 | } | |
794 | ||
4584312d | 795 | entries = amdgpu_bo_size(bo) / 8; |
780637cb CK |
796 | if (!vm->pte_support_ats) { |
797 | ats_entries = 0; | |
798 | ||
799 | } else if (!bo->parent) { | |
800 | ats_entries = amdgpu_vm_num_ats_entries(adev); | |
801 | ats_entries = min(ats_entries, entries); | |
802 | entries -= ats_entries; | |
4584312d | 803 | |
780637cb CK |
804 | } else { |
805 | struct amdgpu_vm_pt *pt; | |
806 | ||
807 | pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); | |
808 | ats_entries = amdgpu_vm_num_ats_entries(adev); | |
809 | if ((pt - vm->root.entries) >= ats_entries) { | |
810 | ats_entries = 0; | |
4584312d CK |
811 | } else { |
812 | ats_entries = entries; | |
813 | entries = 0; | |
814 | } | |
13307f7e CK |
815 | } |
816 | ||
068c3304 | 817 | ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); |
13307f7e | 818 | |
13307f7e CK |
819 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
820 | if (r) | |
83cd8397 | 821 | return r; |
13307f7e | 822 | |
284dec43 CK |
823 | r = amdgpu_ttm_alloc_gart(&bo->tbo); |
824 | if (r) | |
825 | return r; | |
826 | ||
83cd8397 CK |
827 | if (bo->shadow) { |
828 | r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, | |
829 | &ctx); | |
830 | if (r) | |
831 | return r; | |
832 | ||
833 | r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo); | |
834 | if (r) | |
835 | return r; | |
836 | ||
837 | } | |
838 | ||
13307f7e CK |
839 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
840 | if (r) | |
83cd8397 | 841 | return r; |
13307f7e | 842 | |
83cd8397 CK |
843 | do { |
844 | addr = amdgpu_bo_gpu_offset(bo); | |
845 | if (ats_entries) { | |
846 | uint64_t ats_value; | |
4584312d | 847 | |
83cd8397 CK |
848 | ats_value = AMDGPU_PTE_DEFAULT_ATC; |
849 | if (level != AMDGPU_VM_PTB) | |
850 | ats_value |= AMDGPU_PDE_PTE; | |
4584312d | 851 | |
83cd8397 CK |
852 | amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0, |
853 | ats_entries, 0, ats_value); | |
854 | addr += ats_entries * 8; | |
855 | } | |
4584312d | 856 | |
83cd8397 CK |
857 | if (entries) { |
858 | uint64_t value = 0; | |
e95b93ce | 859 | |
83cd8397 CK |
860 | /* Workaround for fault priority problem on GMC9 */ |
861 | if (level == AMDGPU_VM_PTB && | |
862 | adev->asic_type >= CHIP_VEGA10) | |
863 | value = AMDGPU_PTE_EXECUTABLE; | |
e95b93ce | 864 | |
83cd8397 CK |
865 | amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0, |
866 | entries, 0, value); | |
867 | } | |
868 | ||
869 | bo = bo->shadow; | |
870 | } while (bo); | |
4584312d | 871 | |
13307f7e CK |
872 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
873 | ||
874 | WARN_ON(job->ibs[0].length_dw > 64); | |
83cd8397 | 875 | r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv, |
8db588d5 | 876 | AMDGPU_FENCE_OWNER_KFD, false); |
29e8357b CK |
877 | if (r) |
878 | goto error_free; | |
879 | ||
0e28b10f CK |
880 | r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED, |
881 | &fence); | |
13307f7e CK |
882 | if (r) |
883 | goto error_free; | |
884 | ||
83cd8397 | 885 | amdgpu_bo_fence(vm->root.base.bo, fence, true); |
13307f7e | 886 | dma_fence_put(fence); |
e61736da | 887 | |
13307f7e CK |
888 | return 0; |
889 | ||
890 | error_free: | |
891 | amdgpu_job_free(job); | |
13307f7e CK |
892 | return r; |
893 | } | |
894 | ||
e21eb261 CK |
895 | /** |
896 | * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation | |
897 | * | |
898 | * @adev: amdgpu_device pointer | |
899 | * @vm: requesting vm | |
900 | * @bp: resulting BO allocation parameters | |
901 | */ | |
902 | static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |
903 | int level, struct amdgpu_bo_param *bp) | |
904 | { | |
905 | memset(bp, 0, sizeof(*bp)); | |
906 | ||
907 | bp->size = amdgpu_vm_bo_size(adev, level); | |
908 | bp->byte_align = AMDGPU_GPU_PAGE_SIZE; | |
909 | bp->domain = AMDGPU_GEM_DOMAIN_VRAM; | |
284dec43 CK |
910 | bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); |
911 | bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | | |
912 | AMDGPU_GEM_CREATE_CPU_GTT_USWC; | |
e21eb261 CK |
913 | if (vm->use_cpu_for_update) |
914 | bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | |
03e9dee1 FK |
915 | else if (!vm->root.base.bo || vm->root.base.bo->shadow) |
916 | bp->flags |= AMDGPU_GEM_CREATE_SHADOW; | |
e21eb261 CK |
917 | bp->type = ttm_bo_type_kernel; |
918 | if (vm->root.base.bo) | |
919 | bp->resv = vm->root.base.bo->tbo.resv; | |
920 | } | |
921 | ||
663e4577 CK |
922 | /** |
923 | * amdgpu_vm_alloc_pts - Allocate page tables. | |
924 | * | |
925 | * @adev: amdgpu_device pointer | |
926 | * @vm: VM to allocate page tables for | |
927 | * @saddr: Start address which needs to be allocated | |
928 | * @size: Size from start address we need. | |
929 | * | |
d72a6887 | 930 | * Make sure the page directories and page tables are allocated |
7fc48e59 AG |
931 | * |
932 | * Returns: | |
933 | * 0 on success, errno otherwise. | |
663e4577 CK |
934 | */ |
935 | int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, | |
936 | struct amdgpu_vm *vm, | |
937 | uint64_t saddr, uint64_t size) | |
938 | { | |
d72a6887 CK |
939 | struct amdgpu_vm_pt_cursor cursor; |
940 | struct amdgpu_bo *pt; | |
d72a6887 CK |
941 | uint64_t eaddr; |
942 | int r; | |
663e4577 CK |
943 | |
944 | /* validate the parameters */ | |
945 | if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK) | |
946 | return -EINVAL; | |
947 | ||
948 | eaddr = saddr + size - 1; | |
4584312d | 949 | |
663e4577 CK |
950 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
951 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
952 | ||
4584312d CK |
953 | if (eaddr >= adev->vm_manager.max_pfn) { |
954 | dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n", | |
955 | eaddr, adev->vm_manager.max_pfn); | |
956 | return -EINVAL; | |
957 | } | |
958 | ||
d72a6887 CK |
959 | for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) { |
960 | struct amdgpu_vm_pt *entry = cursor.entry; | |
961 | struct amdgpu_bo_param bp; | |
962 | ||
963 | if (cursor.level < AMDGPU_VM_PTB) { | |
964 | unsigned num_entries; | |
965 | ||
966 | num_entries = amdgpu_vm_num_entries(adev, cursor.level); | |
967 | entry->entries = kvmalloc_array(num_entries, | |
968 | sizeof(*entry->entries), | |
969 | GFP_KERNEL | | |
970 | __GFP_ZERO); | |
971 | if (!entry->entries) | |
972 | return -ENOMEM; | |
973 | } | |
974 | ||
975 | ||
976 | if (entry->base.bo) | |
977 | continue; | |
978 | ||
979 | amdgpu_vm_bo_param(adev, vm, cursor.level, &bp); | |
980 | ||
981 | r = amdgpu_bo_create(adev, &bp, &pt); | |
982 | if (r) | |
983 | return r; | |
984 | ||
d72a6887 CK |
985 | if (vm->use_cpu_for_update) { |
986 | r = amdgpu_bo_kmap(pt, NULL); | |
987 | if (r) | |
988 | goto error_free_pt; | |
989 | } | |
990 | ||
991 | /* Keep a reference to the root directory to avoid | |
992 | * freeing them up in the wrong order. | |
993 | */ | |
994 | pt->parent = amdgpu_bo_ref(cursor.parent->base.bo); | |
995 | ||
996 | amdgpu_vm_bo_base_init(&entry->base, vm, pt); | |
1e293037 | 997 | |
780637cb | 998 | r = amdgpu_vm_clear_bo(adev, vm, pt); |
1e293037 CK |
999 | if (r) |
1000 | goto error_free_pt; | |
d72a6887 CK |
1001 | } |
1002 | ||
1003 | return 0; | |
1004 | ||
1005 | error_free_pt: | |
1006 | amdgpu_bo_unref(&pt->shadow); | |
1007 | amdgpu_bo_unref(&pt); | |
1008 | return r; | |
663e4577 CK |
1009 | } |
1010 | ||
229a37f8 CK |
1011 | /** |
1012 | * amdgpu_vm_free_pts - free PD/PT levels | |
1013 | * | |
1014 | * @adev: amdgpu device structure | |
769f846e | 1015 | * @vm: amdgpu vm structure |
229a37f8 CK |
1016 | * |
1017 | * Free the page directory or page table level and all sub levels. | |
1018 | */ | |
1019 | static void amdgpu_vm_free_pts(struct amdgpu_device *adev, | |
1020 | struct amdgpu_vm *vm) | |
1021 | { | |
1022 | struct amdgpu_vm_pt_cursor cursor; | |
1023 | struct amdgpu_vm_pt *entry; | |
1024 | ||
1025 | for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) { | |
1026 | ||
1027 | if (entry->base.bo) { | |
646b9025 | 1028 | entry->base.bo->vm_bo = NULL; |
229a37f8 CK |
1029 | list_del(&entry->base.vm_status); |
1030 | amdgpu_bo_unref(&entry->base.bo->shadow); | |
1031 | amdgpu_bo_unref(&entry->base.bo); | |
1032 | } | |
1033 | kvfree(entry->entries); | |
1034 | } | |
1035 | ||
1036 | BUG_ON(vm->root.base.bo); | |
1037 | } | |
1038 | ||
e59c0205 AX |
1039 | /** |
1040 | * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug | |
1041 | * | |
1042 | * @adev: amdgpu_device pointer | |
1043 | */ | |
1044 | void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) | |
93dcc37d | 1045 | { |
a1255107 | 1046 | const struct amdgpu_ip_block *ip_block; |
e59c0205 AX |
1047 | bool has_compute_vm_bug; |
1048 | struct amdgpu_ring *ring; | |
1049 | int i; | |
93dcc37d | 1050 | |
e59c0205 | 1051 | has_compute_vm_bug = false; |
93dcc37d | 1052 | |
2990a1fc | 1053 | ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); |
e59c0205 AX |
1054 | if (ip_block) { |
1055 | /* Compute has a VM bug for GFX version < 7. | |
1056 | Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ | |
1057 | if (ip_block->version->major <= 7) | |
1058 | has_compute_vm_bug = true; | |
1059 | else if (ip_block->version->major == 8) | |
1060 | if (adev->gfx.mec_fw_version < 673) | |
1061 | has_compute_vm_bug = true; | |
1062 | } | |
93dcc37d | 1063 | |
e59c0205 AX |
1064 | for (i = 0; i < adev->num_rings; i++) { |
1065 | ring = adev->rings[i]; | |
1066 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) | |
1067 | /* only compute rings */ | |
1068 | ring->has_compute_vm_bug = has_compute_vm_bug; | |
93dcc37d | 1069 | else |
e59c0205 | 1070 | ring->has_compute_vm_bug = false; |
93dcc37d | 1071 | } |
93dcc37d AD |
1072 | } |
1073 | ||
7fc48e59 AG |
1074 | /** |
1075 | * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. | |
1076 | * | |
1077 | * @ring: ring on which the job will be submitted | |
1078 | * @job: job to submit | |
1079 | * | |
1080 | * Returns: | |
1081 | * True if sync is needed. | |
1082 | */ | |
b9bf33d5 CZ |
1083 | bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, |
1084 | struct amdgpu_job *job) | |
e60f8db5 | 1085 | { |
b9bf33d5 CZ |
1086 | struct amdgpu_device *adev = ring->adev; |
1087 | unsigned vmhub = ring->funcs->vmhub; | |
620f774f CK |
1088 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
1089 | struct amdgpu_vmid *id; | |
b9bf33d5 | 1090 | bool gds_switch_needed; |
e59c0205 | 1091 | bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; |
b9bf33d5 | 1092 | |
c4f46f22 | 1093 | if (job->vmid == 0) |
b9bf33d5 | 1094 | return false; |
c4f46f22 | 1095 | id = &id_mgr->ids[job->vmid]; |
b9bf33d5 CZ |
1096 | gds_switch_needed = ring->funcs->emit_gds_switch && ( |
1097 | id->gds_base != job->gds_base || | |
1098 | id->gds_size != job->gds_size || | |
1099 | id->gws_base != job->gws_base || | |
1100 | id->gws_size != job->gws_size || | |
1101 | id->oa_base != job->oa_base || | |
1102 | id->oa_size != job->oa_size); | |
e60f8db5 | 1103 | |
620f774f | 1104 | if (amdgpu_vmid_had_gpu_reset(adev, id)) |
b9bf33d5 | 1105 | return true; |
e60f8db5 | 1106 | |
bb37b67d | 1107 | return vm_flush_needed || gds_switch_needed; |
b9bf33d5 CZ |
1108 | } |
1109 | ||
d38ceaf9 AD |
1110 | /** |
1111 | * amdgpu_vm_flush - hardware flush the vm | |
1112 | * | |
1113 | * @ring: ring to use for flush | |
00553cf8 | 1114 | * @job: related job |
7fc48e59 | 1115 | * @need_pipe_sync: is pipe sync needed |
d38ceaf9 | 1116 | * |
4ff37a83 | 1117 | * Emit a VM flush when it is necessary. |
7fc48e59 AG |
1118 | * |
1119 | * Returns: | |
1120 | * 0 on success, errno otherwise. | |
d38ceaf9 | 1121 | */ |
8fdf074f | 1122 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) |
d38ceaf9 | 1123 | { |
971fe9a9 | 1124 | struct amdgpu_device *adev = ring->adev; |
7645670d | 1125 | unsigned vmhub = ring->funcs->vmhub; |
620f774f | 1126 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
c4f46f22 | 1127 | struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; |
d564a06e | 1128 | bool gds_switch_needed = ring->funcs->emit_gds_switch && ( |
fd53be30 CZ |
1129 | id->gds_base != job->gds_base || |
1130 | id->gds_size != job->gds_size || | |
1131 | id->gws_base != job->gws_base || | |
1132 | id->gws_size != job->gws_size || | |
1133 | id->oa_base != job->oa_base || | |
1134 | id->oa_size != job->oa_size); | |
de37e68a | 1135 | bool vm_flush_needed = job->vm_needs_flush; |
b3cd285f CK |
1136 | bool pasid_mapping_needed = id->pasid != job->pasid || |
1137 | !id->pasid_mapping || | |
1138 | !dma_fence_is_signaled(id->pasid_mapping); | |
1139 | struct dma_fence *fence = NULL; | |
c0e51931 | 1140 | unsigned patch_offset = 0; |
41d9eb2c | 1141 | int r; |
d564a06e | 1142 | |
620f774f | 1143 | if (amdgpu_vmid_had_gpu_reset(adev, id)) { |
f7d015b9 CK |
1144 | gds_switch_needed = true; |
1145 | vm_flush_needed = true; | |
b3cd285f | 1146 | pasid_mapping_needed = true; |
f7d015b9 | 1147 | } |
971fe9a9 | 1148 | |
b3cd285f | 1149 | gds_switch_needed &= !!ring->funcs->emit_gds_switch; |
d8de8260 AG |
1150 | vm_flush_needed &= !!ring->funcs->emit_vm_flush && |
1151 | job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; | |
b3cd285f CK |
1152 | pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && |
1153 | ring->funcs->emit_wreg; | |
1154 | ||
8fdf074f | 1155 | if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) |
f7d015b9 | 1156 | return 0; |
41d9eb2c | 1157 | |
c0e51931 CK |
1158 | if (ring->funcs->init_cond_exec) |
1159 | patch_offset = amdgpu_ring_init_cond_exec(ring); | |
41d9eb2c | 1160 | |
8fdf074f ML |
1161 | if (need_pipe_sync) |
1162 | amdgpu_ring_emit_pipeline_sync(ring); | |
1163 | ||
b3cd285f | 1164 | if (vm_flush_needed) { |
c4f46f22 | 1165 | trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); |
c633c00b | 1166 | amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); |
b3cd285f CK |
1167 | } |
1168 | ||
1169 | if (pasid_mapping_needed) | |
1170 | amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); | |
e9d672b2 | 1171 | |
b3cd285f | 1172 | if (vm_flush_needed || pasid_mapping_needed) { |
d240cd9e | 1173 | r = amdgpu_fence_emit(ring, &fence, 0); |
c0e51931 CK |
1174 | if (r) |
1175 | return r; | |
b3cd285f | 1176 | } |
e9d672b2 | 1177 | |
b3cd285f | 1178 | if (vm_flush_needed) { |
7645670d | 1179 | mutex_lock(&id_mgr->lock); |
c0e51931 | 1180 | dma_fence_put(id->last_flush); |
b3cd285f CK |
1181 | id->last_flush = dma_fence_get(fence); |
1182 | id->current_gpu_reset_count = | |
1183 | atomic_read(&adev->gpu_reset_counter); | |
7645670d | 1184 | mutex_unlock(&id_mgr->lock); |
c0e51931 | 1185 | } |
e9d672b2 | 1186 | |
b3cd285f CK |
1187 | if (pasid_mapping_needed) { |
1188 | id->pasid = job->pasid; | |
1189 | dma_fence_put(id->pasid_mapping); | |
1190 | id->pasid_mapping = dma_fence_get(fence); | |
1191 | } | |
1192 | dma_fence_put(fence); | |
1193 | ||
7c4378f4 | 1194 | if (ring->funcs->emit_gds_switch && gds_switch_needed) { |
c0e51931 CK |
1195 | id->gds_base = job->gds_base; |
1196 | id->gds_size = job->gds_size; | |
1197 | id->gws_base = job->gws_base; | |
1198 | id->gws_size = job->gws_size; | |
1199 | id->oa_base = job->oa_base; | |
1200 | id->oa_size = job->oa_size; | |
c4f46f22 | 1201 | amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, |
c0e51931 CK |
1202 | job->gds_size, job->gws_base, |
1203 | job->gws_size, job->oa_base, | |
1204 | job->oa_size); | |
1205 | } | |
1206 | ||
1207 | if (ring->funcs->patch_cond_exec) | |
1208 | amdgpu_ring_patch_cond_exec(ring, patch_offset); | |
1209 | ||
1210 | /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ | |
1211 | if (ring->funcs->emit_switch_buffer) { | |
1212 | amdgpu_ring_emit_switch_buffer(ring); | |
1213 | amdgpu_ring_emit_switch_buffer(ring); | |
e9d672b2 | 1214 | } |
41d9eb2c | 1215 | return 0; |
971fe9a9 CK |
1216 | } |
1217 | ||
d38ceaf9 AD |
1218 | /** |
1219 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | |
1220 | * | |
1221 | * @vm: requested vm | |
1222 | * @bo: requested buffer object | |
1223 | * | |
8843dbbb | 1224 | * Find @bo inside the requested vm. |
d38ceaf9 AD |
1225 | * Search inside the @bos vm list for the requested vm |
1226 | * Returns the found bo_va or NULL if none is found | |
1227 | * | |
1228 | * Object has to be reserved! | |
7fc48e59 AG |
1229 | * |
1230 | * Returns: | |
1231 | * Found bo_va or NULL. | |
d38ceaf9 AD |
1232 | */ |
1233 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |
1234 | struct amdgpu_bo *bo) | |
1235 | { | |
646b9025 | 1236 | struct amdgpu_vm_bo_base *base; |
d38ceaf9 | 1237 | |
646b9025 CK |
1238 | for (base = bo->vm_bo; base; base = base->next) { |
1239 | if (base->vm != vm) | |
1240 | continue; | |
1241 | ||
1242 | return container_of(base, struct amdgpu_bo_va, base); | |
d38ceaf9 AD |
1243 | } |
1244 | return NULL; | |
1245 | } | |
1246 | ||
1247 | /** | |
afef8b8f | 1248 | * amdgpu_vm_do_set_ptes - helper to call the right asic function |
d38ceaf9 | 1249 | * |
29efc4f5 | 1250 | * @params: see amdgpu_pte_update_params definition |
373ac645 | 1251 | * @bo: PD/PT to update |
d38ceaf9 AD |
1252 | * @pe: addr of the page entry |
1253 | * @addr: dst addr to write into pe | |
1254 | * @count: number of page entries to update | |
1255 | * @incr: increase next addr by incr bytes | |
1256 | * @flags: hw access flags | |
d38ceaf9 AD |
1257 | * |
1258 | * Traces the parameters and calls the right asic functions | |
1259 | * to setup the page table using the DMA. | |
1260 | */ | |
afef8b8f | 1261 | static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, |
373ac645 | 1262 | struct amdgpu_bo *bo, |
afef8b8f CK |
1263 | uint64_t pe, uint64_t addr, |
1264 | unsigned count, uint32_t incr, | |
6b777607 | 1265 | uint64_t flags) |
d38ceaf9 | 1266 | { |
373ac645 | 1267 | pe += amdgpu_bo_gpu_offset(bo); |
ec2f05f0 | 1268 | trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); |
d38ceaf9 | 1269 | |
afef8b8f | 1270 | if (count < 3) { |
de9ea7bd CK |
1271 | amdgpu_vm_write_pte(params->adev, params->ib, pe, |
1272 | addr | flags, count, incr); | |
d38ceaf9 AD |
1273 | |
1274 | } else { | |
27c5f36f | 1275 | amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, |
d38ceaf9 AD |
1276 | count, incr, flags); |
1277 | } | |
1278 | } | |
1279 | ||
afef8b8f CK |
1280 | /** |
1281 | * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART | |
1282 | * | |
1283 | * @params: see amdgpu_pte_update_params definition | |
373ac645 | 1284 | * @bo: PD/PT to update |
afef8b8f CK |
1285 | * @pe: addr of the page entry |
1286 | * @addr: dst addr to write into pe | |
1287 | * @count: number of page entries to update | |
1288 | * @incr: increase next addr by incr bytes | |
1289 | * @flags: hw access flags | |
1290 | * | |
1291 | * Traces the parameters and calls the DMA function to copy the PTEs. | |
1292 | */ | |
1293 | static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, | |
373ac645 | 1294 | struct amdgpu_bo *bo, |
afef8b8f CK |
1295 | uint64_t pe, uint64_t addr, |
1296 | unsigned count, uint32_t incr, | |
6b777607 | 1297 | uint64_t flags) |
afef8b8f | 1298 | { |
ec2f05f0 | 1299 | uint64_t src = (params->src + (addr >> 12) * 8); |
afef8b8f | 1300 | |
373ac645 | 1301 | pe += amdgpu_bo_gpu_offset(bo); |
ec2f05f0 CK |
1302 | trace_amdgpu_vm_copy_ptes(pe, src, count); |
1303 | ||
1304 | amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); | |
afef8b8f CK |
1305 | } |
1306 | ||
d38ceaf9 | 1307 | /** |
b07c9d2a | 1308 | * amdgpu_vm_map_gart - Resolve gart mapping of addr |
d38ceaf9 | 1309 | * |
b07c9d2a | 1310 | * @pages_addr: optional DMA address to use for lookup |
d38ceaf9 AD |
1311 | * @addr: the unmapped addr |
1312 | * | |
1313 | * Look up the physical address of the page that the pte resolves | |
7fc48e59 AG |
1314 | * to. |
1315 | * | |
1316 | * Returns: | |
1317 | * The pointer for the page table entry. | |
d38ceaf9 | 1318 | */ |
de9ea7bd | 1319 | static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) |
d38ceaf9 AD |
1320 | { |
1321 | uint64_t result; | |
1322 | ||
de9ea7bd CK |
1323 | /* page table offset */ |
1324 | result = pages_addr[addr >> PAGE_SHIFT]; | |
b07c9d2a | 1325 | |
de9ea7bd CK |
1326 | /* in case cpu page size != gpu page size*/ |
1327 | result |= addr & (~PAGE_MASK); | |
d38ceaf9 | 1328 | |
b07c9d2a | 1329 | result &= 0xFFFFFFFFFFFFF000ULL; |
d38ceaf9 AD |
1330 | |
1331 | return result; | |
1332 | } | |
1333 | ||
3c824172 HK |
1334 | /** |
1335 | * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU | |
1336 | * | |
1337 | * @params: see amdgpu_pte_update_params definition | |
373ac645 | 1338 | * @bo: PD/PT to update |
3c824172 HK |
1339 | * @pe: kmap addr of the page entry |
1340 | * @addr: dst addr to write into pe | |
1341 | * @count: number of page entries to update | |
1342 | * @incr: increase next addr by incr bytes | |
1343 | * @flags: hw access flags | |
1344 | * | |
1345 | * Write count number of PT/PD entries directly. | |
1346 | */ | |
1347 | static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, | |
373ac645 | 1348 | struct amdgpu_bo *bo, |
3c824172 HK |
1349 | uint64_t pe, uint64_t addr, |
1350 | unsigned count, uint32_t incr, | |
1351 | uint64_t flags) | |
1352 | { | |
1353 | unsigned int i; | |
b4d42511 | 1354 | uint64_t value; |
3c824172 | 1355 | |
373ac645 CK |
1356 | pe += (unsigned long)amdgpu_bo_kptr(bo); |
1357 | ||
03918b36 CK |
1358 | trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); |
1359 | ||
3c824172 | 1360 | for (i = 0; i < count; i++) { |
b4d42511 HK |
1361 | value = params->pages_addr ? |
1362 | amdgpu_vm_map_gart(params->pages_addr, addr) : | |
1363 | addr; | |
132f34e4 CK |
1364 | amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe, |
1365 | i, value, flags); | |
3c824172 HK |
1366 | addr += incr; |
1367 | } | |
3c824172 HK |
1368 | } |
1369 | ||
1c860a02 CK |
1370 | /** |
1371 | * amdgpu_vm_update_func - helper to call update function | |
1372 | * | |
1373 | * Calls the update function for both the given BO as well as its shadow. | |
1374 | */ | |
1375 | static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params, | |
1376 | struct amdgpu_bo *bo, | |
1377 | uint64_t pe, uint64_t addr, | |
1378 | unsigned count, uint32_t incr, | |
1379 | uint64_t flags) | |
1380 | { | |
1381 | if (bo->shadow) | |
1382 | params->func(params, bo->shadow, pe, addr, count, incr, flags); | |
1383 | params->func(params, bo, pe, addr, count, incr, flags); | |
1384 | } | |
1385 | ||
f8991bab | 1386 | /* |
6989f246 | 1387 | * amdgpu_vm_update_pde - update a single level in the hierarchy |
f8991bab | 1388 | * |
6989f246 | 1389 | * @param: parameters for the update |
f8991bab | 1390 | * @vm: requested vm |
194d2161 | 1391 | * @parent: parent directory |
6989f246 | 1392 | * @entry: entry to update |
f8991bab | 1393 | * |
6989f246 | 1394 | * Makes sure the requested entry in parent is up to date. |
f8991bab | 1395 | */ |
6989f246 CK |
1396 | static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params, |
1397 | struct amdgpu_vm *vm, | |
1398 | struct amdgpu_vm_pt *parent, | |
1399 | struct amdgpu_vm_pt *entry) | |
d38ceaf9 | 1400 | { |
373ac645 | 1401 | struct amdgpu_bo *bo = parent->base.bo, *pbo; |
3de676d8 CK |
1402 | uint64_t pde, pt, flags; |
1403 | unsigned level; | |
d5fc5e82 | 1404 | |
6989f246 CK |
1405 | /* Don't update huge pages here */ |
1406 | if (entry->huge) | |
1407 | return; | |
d38ceaf9 | 1408 | |
373ac645 | 1409 | for (level = 0, pbo = bo->parent; pbo; ++level) |
3de676d8 CK |
1410 | pbo = pbo->parent; |
1411 | ||
196f7489 | 1412 | level += params->adev->vm_manager.root_level; |
24a8d289 | 1413 | amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); |
373ac645 | 1414 | pde = (entry - parent->entries) * 8; |
1c860a02 | 1415 | amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags); |
d38ceaf9 AD |
1416 | } |
1417 | ||
92456b93 | 1418 | /* |
d4085ea9 | 1419 | * amdgpu_vm_invalidate_pds - mark all PDs as invalid |
92456b93 | 1420 | * |
7fc48e59 AG |
1421 | * @adev: amdgpu_device pointer |
1422 | * @vm: related vm | |
92456b93 CK |
1423 | * |
1424 | * Mark all PD level as invalid after an error. | |
1425 | */ | |
d4085ea9 CK |
1426 | static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, |
1427 | struct amdgpu_vm *vm) | |
92456b93 | 1428 | { |
d4085ea9 CK |
1429 | struct amdgpu_vm_pt_cursor cursor; |
1430 | struct amdgpu_vm_pt *entry; | |
92456b93 | 1431 | |
d4085ea9 CK |
1432 | for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) |
1433 | if (entry->base.bo && !entry->base.moved) | |
bcdc9fd6 | 1434 | amdgpu_vm_bo_relocated(&entry->base); |
92456b93 CK |
1435 | } |
1436 | ||
194d2161 CK |
1437 | /* |
1438 | * amdgpu_vm_update_directories - make sure that all directories are valid | |
1439 | * | |
1440 | * @adev: amdgpu_device pointer | |
1441 | * @vm: requested vm | |
1442 | * | |
1443 | * Makes sure all directories are up to date. | |
7fc48e59 AG |
1444 | * |
1445 | * Returns: | |
1446 | * 0 for success, error for failure. | |
194d2161 CK |
1447 | */ |
1448 | int amdgpu_vm_update_directories(struct amdgpu_device *adev, | |
1449 | struct amdgpu_vm *vm) | |
1450 | { | |
6989f246 CK |
1451 | struct amdgpu_pte_update_params params; |
1452 | struct amdgpu_job *job; | |
1453 | unsigned ndw = 0; | |
78aa02c7 | 1454 | int r = 0; |
92456b93 | 1455 | |
6989f246 CK |
1456 | if (list_empty(&vm->relocated)) |
1457 | return 0; | |
1458 | ||
1459 | restart: | |
1460 | memset(¶ms, 0, sizeof(params)); | |
1461 | params.adev = adev; | |
1462 | ||
1463 | if (vm->use_cpu_for_update) { | |
e8e32426 FK |
1464 | r = amdgpu_bo_sync_wait(vm->root.base.bo, |
1465 | AMDGPU_FENCE_OWNER_VM, true); | |
6989f246 CK |
1466 | if (unlikely(r)) |
1467 | return r; | |
1468 | ||
1469 | params.func = amdgpu_vm_cpu_set_ptes; | |
1470 | } else { | |
1471 | ndw = 512 * 8; | |
1472 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); | |
1473 | if (r) | |
1474 | return r; | |
1475 | ||
1476 | params.ib = &job->ibs[0]; | |
1477 | params.func = amdgpu_vm_do_set_ptes; | |
1478 | } | |
1479 | ||
ea09729c | 1480 | while (!list_empty(&vm->relocated)) { |
6989f246 | 1481 | struct amdgpu_vm_pt *pt, *entry; |
ea09729c | 1482 | |
ba79fde4 CK |
1483 | entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, |
1484 | base.vm_status); | |
1485 | amdgpu_vm_bo_idle(&entry->base); | |
ea09729c | 1486 | |
ba79fde4 CK |
1487 | pt = amdgpu_vm_pt_parent(entry); |
1488 | if (!pt) | |
6989f246 | 1489 | continue; |
6989f246 | 1490 | |
6989f246 CK |
1491 | amdgpu_vm_update_pde(¶ms, vm, pt, entry); |
1492 | ||
6989f246 CK |
1493 | if (!vm->use_cpu_for_update && |
1494 | (ndw - params.ib->length_dw) < 32) | |
1495 | break; | |
ea09729c | 1496 | } |
92456b93 | 1497 | |
68c62306 CK |
1498 | if (vm->use_cpu_for_update) { |
1499 | /* Flush HDP */ | |
1500 | mb(); | |
69882565 | 1501 | amdgpu_asic_flush_hdp(adev, NULL); |
6989f246 CK |
1502 | } else if (params.ib->length_dw == 0) { |
1503 | amdgpu_job_free(job); | |
1504 | } else { | |
1505 | struct amdgpu_bo *root = vm->root.base.bo; | |
1506 | struct amdgpu_ring *ring; | |
1507 | struct dma_fence *fence; | |
1508 | ||
068c3304 | 1509 | ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, |
6989f246 CK |
1510 | sched); |
1511 | ||
1512 | amdgpu_ring_pad_ib(ring, params.ib); | |
1513 | amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, | |
1514 | AMDGPU_FENCE_OWNER_VM, false); | |
6989f246 | 1515 | WARN_ON(params.ib->length_dw > ndw); |
0e28b10f CK |
1516 | r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, |
1517 | &fence); | |
6989f246 CK |
1518 | if (r) |
1519 | goto error; | |
1520 | ||
1521 | amdgpu_bo_fence(root, fence, true); | |
1522 | dma_fence_put(vm->last_update); | |
1523 | vm->last_update = fence; | |
68c62306 CK |
1524 | } |
1525 | ||
6989f246 CK |
1526 | if (!list_empty(&vm->relocated)) |
1527 | goto restart; | |
1528 | ||
1529 | return 0; | |
1530 | ||
1531 | error: | |
d4085ea9 | 1532 | amdgpu_vm_invalidate_pds(adev, vm); |
6989f246 | 1533 | amdgpu_job_free(job); |
92456b93 | 1534 | return r; |
194d2161 CK |
1535 | } |
1536 | ||
cf2f0a37 | 1537 | /** |
e95b93ce | 1538 | * amdgpu_vm_update_flags - figure out flags for PTE updates |
cf2f0a37 | 1539 | * |
dfcd99f6 | 1540 | * Make sure to set the right flags for the PTEs at the desired level. |
cf2f0a37 | 1541 | */ |
e95b93ce CK |
1542 | static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params, |
1543 | struct amdgpu_bo *bo, unsigned level, | |
1544 | uint64_t pe, uint64_t addr, | |
1545 | unsigned count, uint32_t incr, | |
1546 | uint64_t flags) | |
cf2f0a37 | 1547 | |
dfcd99f6 CK |
1548 | { |
1549 | if (level != AMDGPU_VM_PTB) { | |
cf2f0a37 | 1550 | flags |= AMDGPU_PDE_PTE; |
dfcd99f6 | 1551 | amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); |
e95b93ce CK |
1552 | |
1553 | } else if (params->adev->asic_type >= CHIP_VEGA10 && | |
1554 | !(flags & AMDGPU_PTE_VALID) && | |
1555 | !(flags & AMDGPU_PTE_PRT)) { | |
1556 | ||
1557 | /* Workaround for fault priority problem on GMC9 */ | |
1558 | flags |= AMDGPU_PTE_EXECUTABLE; | |
cf2f0a37 AD |
1559 | } |
1560 | ||
dfcd99f6 CK |
1561 | amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags); |
1562 | } | |
1563 | ||
1564 | /** | |
1565 | * amdgpu_vm_fragment - get fragment for PTEs | |
1566 | * | |
1567 | * @params: see amdgpu_pte_update_params definition | |
1568 | * @start: first PTE to handle | |
1569 | * @end: last PTE to handle | |
1570 | * @flags: hw mapping flags | |
1571 | * @frag: resulting fragment size | |
1572 | * @frag_end: end of this fragment | |
1573 | * | |
1574 | * Returns the first possible fragment for the start and end address. | |
1575 | */ | |
1576 | static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params, | |
1577 | uint64_t start, uint64_t end, uint64_t flags, | |
1578 | unsigned int *frag, uint64_t *frag_end) | |
1579 | { | |
1580 | /** | |
1581 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
1582 | * field in the PTE. When this field is set to a non-zero value, page | |
1583 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
1584 | * flags are considered valid for all PTEs within the fragment range | |
1585 | * and corresponding mappings are assumed to be physically contiguous. | |
1586 | * | |
1587 | * The L1 TLB can store a single PTE for the whole fragment, | |
1588 | * significantly increasing the space available for translation | |
1589 | * caching. This leads to large improvements in throughput when the | |
1590 | * TLB is under pressure. | |
1591 | * | |
1592 | * The L2 TLB distributes small and large fragments into two | |
1593 | * asymmetric partitions. The large fragment cache is significantly | |
1594 | * larger. Thus, we try to use large fragments wherever possible. | |
1595 | * Userspace can support this by aligning virtual base address and | |
1596 | * allocation size to the fragment size. | |
1b1d5c43 CK |
1597 | * |
1598 | * Starting with Vega10 the fragment size only controls the L1. The L2 | |
1599 | * is now directly feed with small/huge/giant pages from the walker. | |
dfcd99f6 | 1600 | */ |
1b1d5c43 CK |
1601 | unsigned max_frag; |
1602 | ||
1603 | if (params->adev->asic_type < CHIP_VEGA10) | |
1604 | max_frag = params->adev->vm_manager.fragment_size; | |
1605 | else | |
1606 | max_frag = 31; | |
dfcd99f6 CK |
1607 | |
1608 | /* system pages are non continuously */ | |
0c70dd49 | 1609 | if (params->src) { |
dfcd99f6 CK |
1610 | *frag = 0; |
1611 | *frag_end = end; | |
ec5207c9 | 1612 | return; |
3cc1d3ea | 1613 | } |
cf2f0a37 | 1614 | |
dfcd99f6 CK |
1615 | /* This intentionally wraps around if no bit is set */ |
1616 | *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); | |
1617 | if (*frag >= max_frag) { | |
1618 | *frag = max_frag; | |
1619 | *frag_end = end & ~((1ULL << max_frag) - 1); | |
1620 | } else { | |
1621 | *frag_end = start + (1 << *frag); | |
1622 | } | |
4e2cb640 CK |
1623 | } |
1624 | ||
d38ceaf9 AD |
1625 | /** |
1626 | * amdgpu_vm_update_ptes - make sure that page tables are valid | |
1627 | * | |
29efc4f5 | 1628 | * @params: see amdgpu_pte_update_params definition |
d38ceaf9 AD |
1629 | * @start: start of GPU address range |
1630 | * @end: end of GPU address range | |
677131a1 | 1631 | * @dst: destination address to map to, the next dst inside the function |
d38ceaf9 AD |
1632 | * @flags: mapping flags |
1633 | * | |
8843dbbb | 1634 | * Update the page tables in the range @start - @end. |
7fc48e59 AG |
1635 | * |
1636 | * Returns: | |
1637 | * 0 for success, -EINVAL for failure. | |
d38ceaf9 | 1638 | */ |
cc28c4ed | 1639 | static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, |
dfcd99f6 CK |
1640 | uint64_t start, uint64_t end, |
1641 | uint64_t dst, uint64_t flags) | |
d38ceaf9 | 1642 | { |
36b32a68 | 1643 | struct amdgpu_device *adev = params->adev; |
dfa70550 | 1644 | struct amdgpu_vm_pt_cursor cursor; |
dfcd99f6 CK |
1645 | uint64_t frag_start = start, frag_end; |
1646 | unsigned int frag; | |
1647 | ||
1648 | /* figure out the initial fragment */ | |
1649 | amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end); | |
d38ceaf9 | 1650 | |
dfcd99f6 CK |
1651 | /* walk over the address space and update the PTs */ |
1652 | amdgpu_vm_pt_start(adev, params->vm, start, &cursor); | |
1653 | while (cursor.pfn < end) { | |
dfa70550 | 1654 | struct amdgpu_bo *pt = cursor.entry->base.bo; |
cb90b97b | 1655 | unsigned shift, parent_shift, mask; |
dfcd99f6 | 1656 | uint64_t incr, entry_end, pe_start; |
cf2f0a37 | 1657 | |
dfcd99f6 | 1658 | if (!pt) |
cf2f0a37 | 1659 | return -ENOENT; |
4e2cb640 | 1660 | |
dfcd99f6 CK |
1661 | /* The root level can't be a huge page */ |
1662 | if (cursor.level == adev->vm_manager.root_level) { | |
1663 | if (!amdgpu_vm_pt_descendant(adev, &cursor)) | |
1664 | return -ENOENT; | |
cf2f0a37 | 1665 | continue; |
dfa70550 | 1666 | } |
cf2f0a37 | 1667 | |
dfcd99f6 CK |
1668 | /* If it isn't already handled it can't be a huge page */ |
1669 | if (cursor.entry->huge) { | |
1670 | /* Add the entry to the relocated list to update it. */ | |
1671 | cursor.entry->huge = false; | |
1672 | amdgpu_vm_bo_relocated(&cursor.entry->base); | |
1673 | } | |
92696dd5 | 1674 | |
dfcd99f6 CK |
1675 | shift = amdgpu_vm_level_shift(adev, cursor.level); |
1676 | parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1); | |
1677 | if (adev->asic_type < CHIP_VEGA10) { | |
1678 | /* No huge page support before GMC v9 */ | |
1679 | if (cursor.level != AMDGPU_VM_PTB) { | |
1680 | if (!amdgpu_vm_pt_descendant(adev, &cursor)) | |
1681 | return -ENOENT; | |
1682 | continue; | |
1683 | } | |
1684 | } else if (frag < shift) { | |
1685 | /* We can't use this level when the fragment size is | |
1686 | * smaller than the address shift. Go to the next | |
1687 | * child entry and try again. | |
1688 | */ | |
1689 | if (!amdgpu_vm_pt_descendant(adev, &cursor)) | |
1690 | return -ENOENT; | |
1691 | continue; | |
1954db15 FK |
1692 | } else if (frag >= parent_shift && |
1693 | cursor.level - 1 != adev->vm_manager.root_level) { | |
dfcd99f6 | 1694 | /* If the fragment size is even larger than the parent |
1954db15 FK |
1695 | * shift we should go up one level and check it again |
1696 | * unless one level up is the root level. | |
dfcd99f6 CK |
1697 | */ |
1698 | if (!amdgpu_vm_pt_ancestor(&cursor)) | |
1699 | return -ENOENT; | |
1700 | continue; | |
6849d47c RH |
1701 | } |
1702 | ||
dfcd99f6 | 1703 | /* Looks good so far, calculate parameters for the update */ |
9ce2b991 | 1704 | incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; |
cb90b97b CK |
1705 | mask = amdgpu_vm_entries_mask(adev, cursor.level); |
1706 | pe_start = ((cursor.pfn >> shift) & mask) * 8; | |
9ce2b991 | 1707 | entry_end = (uint64_t)(mask + 1) << shift; |
dfcd99f6 CK |
1708 | entry_end += cursor.pfn & ~(entry_end - 1); |
1709 | entry_end = min(entry_end, end); | |
1710 | ||
1711 | do { | |
1712 | uint64_t upd_end = min(entry_end, frag_end); | |
1713 | unsigned nptes = (upd_end - frag_start) >> shift; | |
1714 | ||
e95b93ce CK |
1715 | amdgpu_vm_update_flags(params, pt, cursor.level, |
1716 | pe_start, dst, nptes, incr, | |
1717 | flags | AMDGPU_PTE_FRAG(frag)); | |
dfcd99f6 CK |
1718 | |
1719 | pe_start += nptes * 8; | |
9ce2b991 | 1720 | dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; |
dfcd99f6 CK |
1721 | |
1722 | frag_start = upd_end; | |
1723 | if (frag_start >= frag_end) { | |
1724 | /* figure out the next fragment */ | |
1725 | amdgpu_vm_fragment(params, frag_start, end, | |
1726 | flags, &frag, &frag_end); | |
1727 | if (frag < shift) | |
1728 | break; | |
1729 | } | |
1730 | } while (frag_start < entry_end); | |
92696dd5 | 1731 | |
c1a17777 CK |
1732 | if (amdgpu_vm_pt_descendant(adev, &cursor)) { |
1733 | /* Mark all child entries as huge */ | |
1734 | while (cursor.pfn < frag_start) { | |
1735 | cursor.entry->huge = true; | |
1736 | amdgpu_vm_pt_next(adev, &cursor); | |
1737 | } | |
1738 | ||
1739 | } else if (frag >= shift) { | |
1740 | /* or just move on to the next on the same level. */ | |
dfcd99f6 | 1741 | amdgpu_vm_pt_next(adev, &cursor); |
c1a17777 | 1742 | } |
92696dd5 | 1743 | } |
6849d47c RH |
1744 | |
1745 | return 0; | |
d38ceaf9 AD |
1746 | } |
1747 | ||
d38ceaf9 AD |
1748 | /** |
1749 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | |
1750 | * | |
1751 | * @adev: amdgpu_device pointer | |
3cabaa54 | 1752 | * @exclusive: fence we need to sync to |
fa3ab3c7 | 1753 | * @pages_addr: DMA addresses to use for mapping |
d38ceaf9 | 1754 | * @vm: requested vm |
a14faa65 CK |
1755 | * @start: start of mapped range |
1756 | * @last: last mapped entry | |
1757 | * @flags: flags for the entries | |
d38ceaf9 | 1758 | * @addr: addr to set the area to |
d38ceaf9 AD |
1759 | * @fence: optional resulting fence |
1760 | * | |
a14faa65 | 1761 | * Fill in the page table entries between @start and @last. |
7fc48e59 AG |
1762 | * |
1763 | * Returns: | |
1764 | * 0 for success, -EINVAL for failure. | |
d38ceaf9 AD |
1765 | */ |
1766 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |
f54d1867 | 1767 | struct dma_fence *exclusive, |
fa3ab3c7 | 1768 | dma_addr_t *pages_addr, |
d38ceaf9 | 1769 | struct amdgpu_vm *vm, |
a14faa65 | 1770 | uint64_t start, uint64_t last, |
6b777607 | 1771 | uint64_t flags, uint64_t addr, |
f54d1867 | 1772 | struct dma_fence **fence) |
d38ceaf9 | 1773 | { |
2d55e45a | 1774 | struct amdgpu_ring *ring; |
a1e08d3b | 1775 | void *owner = AMDGPU_FENCE_OWNER_VM; |
d38ceaf9 | 1776 | unsigned nptes, ncmds, ndw; |
d71518b5 | 1777 | struct amdgpu_job *job; |
29efc4f5 | 1778 | struct amdgpu_pte_update_params params; |
f54d1867 | 1779 | struct dma_fence *f = NULL; |
d38ceaf9 AD |
1780 | int r; |
1781 | ||
afef8b8f CK |
1782 | memset(¶ms, 0, sizeof(params)); |
1783 | params.adev = adev; | |
49ac8a24 | 1784 | params.vm = vm; |
afef8b8f | 1785 | |
8db588d5 | 1786 | /* sync to everything except eviction fences on unmapping */ |
a33cab7a | 1787 | if (!(flags & AMDGPU_PTE_VALID)) |
8db588d5 | 1788 | owner = AMDGPU_FENCE_OWNER_KFD; |
a33cab7a | 1789 | |
b4d42511 HK |
1790 | if (vm->use_cpu_for_update) { |
1791 | /* params.src is used as flag to indicate system Memory */ | |
1792 | if (pages_addr) | |
1793 | params.src = ~0; | |
1794 | ||
90d64722 | 1795 | /* Wait for PT BOs to be idle. PTs share the same resv. object |
b4d42511 HK |
1796 | * as the root PD BO |
1797 | */ | |
e8e32426 | 1798 | r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true); |
b4d42511 HK |
1799 | if (unlikely(r)) |
1800 | return r; | |
1801 | ||
90d64722 | 1802 | /* Wait for any BO move to be completed */ |
7fbd31cc CK |
1803 | if (exclusive) { |
1804 | r = dma_fence_wait(exclusive, true); | |
1805 | if (unlikely(r)) | |
1806 | return r; | |
1807 | } | |
90d64722 | 1808 | |
b4d42511 HK |
1809 | params.func = amdgpu_vm_cpu_set_ptes; |
1810 | params.pages_addr = pages_addr; | |
dfcd99f6 CK |
1811 | return amdgpu_vm_update_ptes(¶ms, start, last + 1, |
1812 | addr, flags); | |
b4d42511 HK |
1813 | } |
1814 | ||
068c3304 | 1815 | ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); |
27c5f36f | 1816 | |
a14faa65 | 1817 | nptes = last - start + 1; |
d38ceaf9 AD |
1818 | |
1819 | /* | |
86209523 | 1820 | * reserve space for two commands every (1 << BLOCK_SIZE) |
d38ceaf9 AD |
1821 | * entries or 2k dwords (whatever is smaller) |
1822 | */ | |
1b52f2d5 CK |
1823 | ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1); |
1824 | ||
1825 | /* The second command is for the shadow pagetables. */ | |
104bd2ca | 1826 | if (vm->root.base.bo->shadow) |
1b52f2d5 | 1827 | ncmds *= 2; |
d38ceaf9 AD |
1828 | |
1829 | /* padding, etc. */ | |
1830 | ndw = 64; | |
1831 | ||
570144c6 | 1832 | if (pages_addr) { |
b0456f93 | 1833 | /* copy commands needed */ |
e6d92197 | 1834 | ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw; |
d38ceaf9 | 1835 | |
b0456f93 | 1836 | /* and also PTEs */ |
d38ceaf9 AD |
1837 | ndw += nptes * 2; |
1838 | ||
afef8b8f CK |
1839 | params.func = amdgpu_vm_do_copy_ptes; |
1840 | ||
d38ceaf9 AD |
1841 | } else { |
1842 | /* set page commands needed */ | |
44e1baeb | 1843 | ndw += ncmds * 10; |
d38ceaf9 | 1844 | |
6849d47c | 1845 | /* extra commands for begin/end fragments */ |
1b52f2d5 | 1846 | ncmds = 2 * adev->vm_manager.fragment_size; |
11528640 | 1847 | if (vm->root.base.bo->shadow) |
1b52f2d5 CK |
1848 | ncmds *= 2; |
1849 | ||
1850 | ndw += 10 * ncmds; | |
afef8b8f CK |
1851 | |
1852 | params.func = amdgpu_vm_do_set_ptes; | |
d38ceaf9 AD |
1853 | } |
1854 | ||
d71518b5 CK |
1855 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
1856 | if (r) | |
d38ceaf9 | 1857 | return r; |
d71518b5 | 1858 | |
29efc4f5 | 1859 | params.ib = &job->ibs[0]; |
d5fc5e82 | 1860 | |
570144c6 | 1861 | if (pages_addr) { |
b0456f93 CK |
1862 | uint64_t *pte; |
1863 | unsigned i; | |
1864 | ||
1865 | /* Put the PTEs at the end of the IB. */ | |
1866 | i = ndw - nptes * 2; | |
1867 | pte= (uint64_t *)&(job->ibs->ptr[i]); | |
1868 | params.src = job->ibs->gpu_addr + i * 4; | |
1869 | ||
1870 | for (i = 0; i < nptes; ++i) { | |
1871 | pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * | |
1872 | AMDGPU_GPU_PAGE_SIZE); | |
1873 | pte[i] |= flags; | |
1874 | } | |
d7a4ac66 | 1875 | addr = 0; |
b0456f93 CK |
1876 | } |
1877 | ||
cebb52b7 | 1878 | r = amdgpu_sync_fence(adev, &job->sync, exclusive, false); |
3cabaa54 CK |
1879 | if (r) |
1880 | goto error_free; | |
1881 | ||
3f3333f8 | 1882 | r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv, |
177ae09b | 1883 | owner, false); |
a1e08d3b CK |
1884 | if (r) |
1885 | goto error_free; | |
d38ceaf9 | 1886 | |
dfcd99f6 | 1887 | r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); |
cc28c4ed HK |
1888 | if (r) |
1889 | goto error_free; | |
d38ceaf9 | 1890 | |
29efc4f5 CK |
1891 | amdgpu_ring_pad_ib(ring, params.ib); |
1892 | WARN_ON(params.ib->length_dw > ndw); | |
0e28b10f | 1893 | r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); |
4af9f07c CZ |
1894 | if (r) |
1895 | goto error_free; | |
d38ceaf9 | 1896 | |
3f3333f8 | 1897 | amdgpu_bo_fence(vm->root.base.bo, f, true); |
284710fa CK |
1898 | dma_fence_put(*fence); |
1899 | *fence = f; | |
d38ceaf9 | 1900 | return 0; |
d5fc5e82 CZ |
1901 | |
1902 | error_free: | |
d71518b5 | 1903 | amdgpu_job_free(job); |
4af9f07c | 1904 | return r; |
d38ceaf9 AD |
1905 | } |
1906 | ||
a14faa65 CK |
1907 | /** |
1908 | * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks | |
1909 | * | |
1910 | * @adev: amdgpu_device pointer | |
3cabaa54 | 1911 | * @exclusive: fence we need to sync to |
8358dcee | 1912 | * @pages_addr: DMA addresses to use for mapping |
a14faa65 CK |
1913 | * @vm: requested vm |
1914 | * @mapping: mapped range and flags to use for the update | |
8358dcee | 1915 | * @flags: HW flags for the mapping |
63e0ba40 | 1916 | * @nodes: array of drm_mm_nodes with the MC addresses |
a14faa65 CK |
1917 | * @fence: optional resulting fence |
1918 | * | |
1919 | * Split the mapping into smaller chunks so that each update fits | |
1920 | * into a SDMA IB. | |
7fc48e59 AG |
1921 | * |
1922 | * Returns: | |
1923 | * 0 for success, -EINVAL for failure. | |
a14faa65 CK |
1924 | */ |
1925 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |
f54d1867 | 1926 | struct dma_fence *exclusive, |
8358dcee | 1927 | dma_addr_t *pages_addr, |
a14faa65 CK |
1928 | struct amdgpu_vm *vm, |
1929 | struct amdgpu_bo_va_mapping *mapping, | |
6b777607 | 1930 | uint64_t flags, |
63e0ba40 | 1931 | struct drm_mm_node *nodes, |
f54d1867 | 1932 | struct dma_fence **fence) |
a14faa65 | 1933 | { |
9fc8fc70 | 1934 | unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size; |
570144c6 | 1935 | uint64_t pfn, start = mapping->start; |
a14faa65 CK |
1936 | int r; |
1937 | ||
1938 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | |
1939 | * but in case of something, we filter the flags in first place | |
1940 | */ | |
1941 | if (!(mapping->flags & AMDGPU_PTE_READABLE)) | |
1942 | flags &= ~AMDGPU_PTE_READABLE; | |
1943 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) | |
1944 | flags &= ~AMDGPU_PTE_WRITEABLE; | |
1945 | ||
15b31c59 AX |
1946 | flags &= ~AMDGPU_PTE_EXECUTABLE; |
1947 | flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; | |
1948 | ||
b0fd18b0 AX |
1949 | flags &= ~AMDGPU_PTE_MTYPE_MASK; |
1950 | flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK); | |
1951 | ||
d0766e98 ZJ |
1952 | if ((mapping->flags & AMDGPU_PTE_PRT) && |
1953 | (adev->asic_type >= CHIP_VEGA10)) { | |
1954 | flags |= AMDGPU_PTE_PRT; | |
1955 | flags &= ~AMDGPU_PTE_VALID; | |
1956 | } | |
1957 | ||
a14faa65 CK |
1958 | trace_amdgpu_vm_bo_update(mapping); |
1959 | ||
63e0ba40 CK |
1960 | pfn = mapping->offset >> PAGE_SHIFT; |
1961 | if (nodes) { | |
1962 | while (pfn >= nodes->size) { | |
1963 | pfn -= nodes->size; | |
1964 | ++nodes; | |
1965 | } | |
fa3ab3c7 | 1966 | } |
a14faa65 | 1967 | |
63e0ba40 | 1968 | do { |
9fc8fc70 | 1969 | dma_addr_t *dma_addr = NULL; |
63e0ba40 CK |
1970 | uint64_t max_entries; |
1971 | uint64_t addr, last; | |
a14faa65 | 1972 | |
63e0ba40 CK |
1973 | if (nodes) { |
1974 | addr = nodes->start << PAGE_SHIFT; | |
1975 | max_entries = (nodes->size - pfn) * | |
463d2fe8 | 1976 | AMDGPU_GPU_PAGES_IN_CPU_PAGE; |
63e0ba40 CK |
1977 | } else { |
1978 | addr = 0; | |
1979 | max_entries = S64_MAX; | |
1980 | } | |
a14faa65 | 1981 | |
63e0ba40 | 1982 | if (pages_addr) { |
9fc8fc70 CK |
1983 | uint64_t count; |
1984 | ||
457e0fee | 1985 | max_entries = min(max_entries, 16ull * 1024ull); |
38e624a1 | 1986 | for (count = 1; |
463d2fe8 | 1987 | count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE; |
38e624a1 | 1988 | ++count) { |
9fc8fc70 CK |
1989 | uint64_t idx = pfn + count; |
1990 | ||
1991 | if (pages_addr[idx] != | |
1992 | (pages_addr[idx - 1] + PAGE_SIZE)) | |
1993 | break; | |
1994 | } | |
1995 | ||
1996 | if (count < min_linear_pages) { | |
1997 | addr = pfn << PAGE_SHIFT; | |
1998 | dma_addr = pages_addr; | |
1999 | } else { | |
2000 | addr = pages_addr[pfn]; | |
463d2fe8 | 2001 | max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE; |
9fc8fc70 CK |
2002 | } |
2003 | ||
63e0ba40 CK |
2004 | } else if (flags & AMDGPU_PTE_VALID) { |
2005 | addr += adev->vm_manager.vram_base_offset; | |
9fc8fc70 | 2006 | addr += pfn << PAGE_SHIFT; |
63e0ba40 | 2007 | } |
63e0ba40 | 2008 | |
a9f87f64 | 2009 | last = min((uint64_t)mapping->last, start + max_entries - 1); |
9fc8fc70 | 2010 | r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm, |
a14faa65 CK |
2011 | start, last, flags, addr, |
2012 | fence); | |
2013 | if (r) | |
2014 | return r; | |
2015 | ||
463d2fe8 | 2016 | pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE; |
63e0ba40 CK |
2017 | if (nodes && nodes->size == pfn) { |
2018 | pfn = 0; | |
2019 | ++nodes; | |
2020 | } | |
a14faa65 | 2021 | start = last + 1; |
63e0ba40 | 2022 | |
a9f87f64 | 2023 | } while (unlikely(start != mapping->last + 1)); |
a14faa65 CK |
2024 | |
2025 | return 0; | |
2026 | } | |
2027 | ||
d38ceaf9 AD |
2028 | /** |
2029 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table | |
2030 | * | |
2031 | * @adev: amdgpu_device pointer | |
2032 | * @bo_va: requested BO and VM object | |
99e124f4 | 2033 | * @clear: if true clear the entries |
d38ceaf9 AD |
2034 | * |
2035 | * Fill in the page table entries for @bo_va. | |
7fc48e59 AG |
2036 | * |
2037 | * Returns: | |
2038 | * 0 for success, -EINVAL for failure. | |
d38ceaf9 AD |
2039 | */ |
2040 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |
2041 | struct amdgpu_bo_va *bo_va, | |
99e124f4 | 2042 | bool clear) |
d38ceaf9 | 2043 | { |
ec681545 CK |
2044 | struct amdgpu_bo *bo = bo_va->base.bo; |
2045 | struct amdgpu_vm *vm = bo_va->base.vm; | |
d38ceaf9 | 2046 | struct amdgpu_bo_va_mapping *mapping; |
8358dcee | 2047 | dma_addr_t *pages_addr = NULL; |
99e124f4 | 2048 | struct ttm_mem_reg *mem; |
63e0ba40 | 2049 | struct drm_mm_node *nodes; |
4e55eb38 | 2050 | struct dma_fence *exclusive, **last_update; |
457e0fee | 2051 | uint64_t flags; |
d38ceaf9 AD |
2052 | int r; |
2053 | ||
7eb80427 | 2054 | if (clear || !bo) { |
99e124f4 | 2055 | mem = NULL; |
63e0ba40 | 2056 | nodes = NULL; |
99e124f4 CK |
2057 | exclusive = NULL; |
2058 | } else { | |
8358dcee CK |
2059 | struct ttm_dma_tt *ttm; |
2060 | ||
7eb80427 | 2061 | mem = &bo->tbo.mem; |
63e0ba40 CK |
2062 | nodes = mem->mm_node; |
2063 | if (mem->mem_type == TTM_PL_TT) { | |
7eb80427 | 2064 | ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); |
8358dcee | 2065 | pages_addr = ttm->dma_address; |
9ab21462 | 2066 | } |
ec681545 | 2067 | exclusive = reservation_object_get_excl(bo->tbo.resv); |
d38ceaf9 AD |
2068 | } |
2069 | ||
457e0fee | 2070 | if (bo) |
ec681545 | 2071 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); |
457e0fee | 2072 | else |
a5f6b5b1 | 2073 | flags = 0x0; |
d38ceaf9 | 2074 | |
4e55eb38 CK |
2075 | if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)) |
2076 | last_update = &vm->last_update; | |
2077 | else | |
2078 | last_update = &bo_va->last_pt_update; | |
2079 | ||
3d7d4d3a CK |
2080 | if (!clear && bo_va->base.moved) { |
2081 | bo_va->base.moved = false; | |
7fc11959 | 2082 | list_splice_init(&bo_va->valids, &bo_va->invalids); |
3d7d4d3a | 2083 | |
cb7b6ec2 CK |
2084 | } else if (bo_va->cleared != clear) { |
2085 | list_splice_init(&bo_va->valids, &bo_va->invalids); | |
3d7d4d3a | 2086 | } |
7fc11959 CK |
2087 | |
2088 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
457e0fee | 2089 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, |
63e0ba40 | 2090 | mapping, flags, nodes, |
4e55eb38 | 2091 | last_update); |
d38ceaf9 AD |
2092 | if (r) |
2093 | return r; | |
2094 | } | |
2095 | ||
cb7b6ec2 CK |
2096 | if (vm->use_cpu_for_update) { |
2097 | /* Flush HDP */ | |
2098 | mb(); | |
69882565 | 2099 | amdgpu_asic_flush_hdp(adev, NULL); |
d6c10f6b CK |
2100 | } |
2101 | ||
bb475839 JZ |
2102 | /* If the BO is not in its preferred location add it back to |
2103 | * the evicted list so that it gets validated again on the | |
2104 | * next command submission. | |
2105 | */ | |
806f043f CK |
2106 | if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { |
2107 | uint32_t mem_type = bo->tbo.mem.mem_type; | |
2108 | ||
2109 | if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) | |
bcdc9fd6 | 2110 | amdgpu_vm_bo_evicted(&bo_va->base); |
806f043f | 2111 | else |
bcdc9fd6 | 2112 | amdgpu_vm_bo_idle(&bo_va->base); |
c12a2ee5 | 2113 | } else { |
bcdc9fd6 | 2114 | amdgpu_vm_bo_done(&bo_va->base); |
806f043f | 2115 | } |
d38ceaf9 | 2116 | |
cb7b6ec2 CK |
2117 | list_splice_init(&bo_va->invalids, &bo_va->valids); |
2118 | bo_va->cleared = clear; | |
2119 | ||
2120 | if (trace_amdgpu_vm_bo_mapping_enabled()) { | |
2121 | list_for_each_entry(mapping, &bo_va->valids, list) | |
2122 | trace_amdgpu_vm_bo_mapping(mapping); | |
68c62306 CK |
2123 | } |
2124 | ||
d38ceaf9 AD |
2125 | return 0; |
2126 | } | |
2127 | ||
284710fa CK |
2128 | /** |
2129 | * amdgpu_vm_update_prt_state - update the global PRT state | |
7fc48e59 AG |
2130 | * |
2131 | * @adev: amdgpu_device pointer | |
284710fa CK |
2132 | */ |
2133 | static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) | |
2134 | { | |
2135 | unsigned long flags; | |
2136 | bool enable; | |
2137 | ||
2138 | spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); | |
451bc8eb | 2139 | enable = !!atomic_read(&adev->vm_manager.num_prt_users); |
132f34e4 | 2140 | adev->gmc.gmc_funcs->set_prt(adev, enable); |
284710fa CK |
2141 | spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); |
2142 | } | |
2143 | ||
451bc8eb | 2144 | /** |
4388fc2a | 2145 | * amdgpu_vm_prt_get - add a PRT user |
7fc48e59 AG |
2146 | * |
2147 | * @adev: amdgpu_device pointer | |
451bc8eb CK |
2148 | */ |
2149 | static void amdgpu_vm_prt_get(struct amdgpu_device *adev) | |
2150 | { | |
132f34e4 | 2151 | if (!adev->gmc.gmc_funcs->set_prt) |
4388fc2a CK |
2152 | return; |
2153 | ||
451bc8eb CK |
2154 | if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) |
2155 | amdgpu_vm_update_prt_state(adev); | |
2156 | } | |
2157 | ||
0b15f2fc CK |
2158 | /** |
2159 | * amdgpu_vm_prt_put - drop a PRT user | |
7fc48e59 AG |
2160 | * |
2161 | * @adev: amdgpu_device pointer | |
0b15f2fc CK |
2162 | */ |
2163 | static void amdgpu_vm_prt_put(struct amdgpu_device *adev) | |
2164 | { | |
451bc8eb | 2165 | if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) |
0b15f2fc CK |
2166 | amdgpu_vm_update_prt_state(adev); |
2167 | } | |
2168 | ||
284710fa | 2169 | /** |
451bc8eb | 2170 | * amdgpu_vm_prt_cb - callback for updating the PRT status |
7fc48e59 AG |
2171 | * |
2172 | * @fence: fence for the callback | |
00553cf8 | 2173 | * @_cb: the callback function |
284710fa CK |
2174 | */ |
2175 | static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) | |
2176 | { | |
2177 | struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); | |
2178 | ||
0b15f2fc | 2179 | amdgpu_vm_prt_put(cb->adev); |
284710fa CK |
2180 | kfree(cb); |
2181 | } | |
2182 | ||
451bc8eb CK |
2183 | /** |
2184 | * amdgpu_vm_add_prt_cb - add callback for updating the PRT status | |
7fc48e59 AG |
2185 | * |
2186 | * @adev: amdgpu_device pointer | |
2187 | * @fence: fence for the callback | |
451bc8eb CK |
2188 | */ |
2189 | static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, | |
2190 | struct dma_fence *fence) | |
2191 | { | |
4388fc2a | 2192 | struct amdgpu_prt_cb *cb; |
451bc8eb | 2193 | |
132f34e4 | 2194 | if (!adev->gmc.gmc_funcs->set_prt) |
4388fc2a CK |
2195 | return; |
2196 | ||
2197 | cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); | |
451bc8eb CK |
2198 | if (!cb) { |
2199 | /* Last resort when we are OOM */ | |
2200 | if (fence) | |
2201 | dma_fence_wait(fence, false); | |
2202 | ||
486a68f5 | 2203 | amdgpu_vm_prt_put(adev); |
451bc8eb CK |
2204 | } else { |
2205 | cb->adev = adev; | |
2206 | if (!fence || dma_fence_add_callback(fence, &cb->cb, | |
2207 | amdgpu_vm_prt_cb)) | |
2208 | amdgpu_vm_prt_cb(fence, &cb->cb); | |
2209 | } | |
2210 | } | |
2211 | ||
284710fa CK |
2212 | /** |
2213 | * amdgpu_vm_free_mapping - free a mapping | |
2214 | * | |
2215 | * @adev: amdgpu_device pointer | |
2216 | * @vm: requested vm | |
2217 | * @mapping: mapping to be freed | |
2218 | * @fence: fence of the unmap operation | |
2219 | * | |
2220 | * Free a mapping and make sure we decrease the PRT usage count if applicable. | |
2221 | */ | |
2222 | static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, | |
2223 | struct amdgpu_vm *vm, | |
2224 | struct amdgpu_bo_va_mapping *mapping, | |
2225 | struct dma_fence *fence) | |
2226 | { | |
451bc8eb CK |
2227 | if (mapping->flags & AMDGPU_PTE_PRT) |
2228 | amdgpu_vm_add_prt_cb(adev, fence); | |
2229 | kfree(mapping); | |
2230 | } | |
284710fa | 2231 | |
451bc8eb CK |
2232 | /** |
2233 | * amdgpu_vm_prt_fini - finish all prt mappings | |
2234 | * | |
2235 | * @adev: amdgpu_device pointer | |
2236 | * @vm: requested vm | |
2237 | * | |
2238 | * Register a cleanup callback to disable PRT support after VM dies. | |
2239 | */ | |
2240 | static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
2241 | { | |
3f3333f8 | 2242 | struct reservation_object *resv = vm->root.base.bo->tbo.resv; |
451bc8eb CK |
2243 | struct dma_fence *excl, **shared; |
2244 | unsigned i, shared_count; | |
2245 | int r; | |
0b15f2fc | 2246 | |
451bc8eb CK |
2247 | r = reservation_object_get_fences_rcu(resv, &excl, |
2248 | &shared_count, &shared); | |
2249 | if (r) { | |
2250 | /* Not enough memory to grab the fence list, as last resort | |
2251 | * block for all the fences to complete. | |
2252 | */ | |
2253 | reservation_object_wait_timeout_rcu(resv, true, false, | |
2254 | MAX_SCHEDULE_TIMEOUT); | |
2255 | return; | |
284710fa | 2256 | } |
451bc8eb CK |
2257 | |
2258 | /* Add a callback for each fence in the reservation object */ | |
2259 | amdgpu_vm_prt_get(adev); | |
2260 | amdgpu_vm_add_prt_cb(adev, excl); | |
2261 | ||
2262 | for (i = 0; i < shared_count; ++i) { | |
2263 | amdgpu_vm_prt_get(adev); | |
2264 | amdgpu_vm_add_prt_cb(adev, shared[i]); | |
2265 | } | |
2266 | ||
2267 | kfree(shared); | |
284710fa CK |
2268 | } |
2269 | ||
d38ceaf9 AD |
2270 | /** |
2271 | * amdgpu_vm_clear_freed - clear freed BOs in the PT | |
2272 | * | |
2273 | * @adev: amdgpu_device pointer | |
2274 | * @vm: requested vm | |
f3467818 NH |
2275 | * @fence: optional resulting fence (unchanged if no work needed to be done |
2276 | * or if an error occurred) | |
d38ceaf9 AD |
2277 | * |
2278 | * Make sure all freed BOs are cleared in the PT. | |
d38ceaf9 | 2279 | * PTs have to be reserved and mutex must be locked! |
7fc48e59 AG |
2280 | * |
2281 | * Returns: | |
2282 | * 0 for success. | |
2283 | * | |
d38ceaf9 AD |
2284 | */ |
2285 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |
f3467818 NH |
2286 | struct amdgpu_vm *vm, |
2287 | struct dma_fence **fence) | |
d38ceaf9 AD |
2288 | { |
2289 | struct amdgpu_bo_va_mapping *mapping; | |
4584312d | 2290 | uint64_t init_pte_value = 0; |
f3467818 | 2291 | struct dma_fence *f = NULL; |
d38ceaf9 AD |
2292 | int r; |
2293 | ||
2294 | while (!list_empty(&vm->freed)) { | |
2295 | mapping = list_first_entry(&vm->freed, | |
2296 | struct amdgpu_bo_va_mapping, list); | |
2297 | list_del(&mapping->list); | |
e17841b9 | 2298 | |
ad9a5b78 CK |
2299 | if (vm->pte_support_ats && |
2300 | mapping->start < AMDGPU_GMC_HOLE_START) | |
6d16dac8 | 2301 | init_pte_value = AMDGPU_PTE_DEFAULT_ATC; |
51ac7eec | 2302 | |
570144c6 | 2303 | r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, |
fc6aa33d | 2304 | mapping->start, mapping->last, |
51ac7eec | 2305 | init_pte_value, 0, &f); |
f3467818 | 2306 | amdgpu_vm_free_mapping(adev, vm, mapping, f); |
284710fa | 2307 | if (r) { |
f3467818 | 2308 | dma_fence_put(f); |
d38ceaf9 | 2309 | return r; |
284710fa | 2310 | } |
f3467818 | 2311 | } |
d38ceaf9 | 2312 | |
f3467818 NH |
2313 | if (fence && f) { |
2314 | dma_fence_put(*fence); | |
2315 | *fence = f; | |
2316 | } else { | |
2317 | dma_fence_put(f); | |
d38ceaf9 | 2318 | } |
f3467818 | 2319 | |
d38ceaf9 AD |
2320 | return 0; |
2321 | ||
2322 | } | |
2323 | ||
2324 | /** | |
73fb16e7 | 2325 | * amdgpu_vm_handle_moved - handle moved BOs in the PT |
d38ceaf9 AD |
2326 | * |
2327 | * @adev: amdgpu_device pointer | |
2328 | * @vm: requested vm | |
2329 | * | |
73fb16e7 | 2330 | * Make sure all BOs which are moved are updated in the PTs. |
7fc48e59 AG |
2331 | * |
2332 | * Returns: | |
2333 | * 0 for success. | |
d38ceaf9 | 2334 | * |
73fb16e7 | 2335 | * PTs have to be reserved! |
d38ceaf9 | 2336 | */ |
73fb16e7 | 2337 | int amdgpu_vm_handle_moved(struct amdgpu_device *adev, |
4e55eb38 | 2338 | struct amdgpu_vm *vm) |
d38ceaf9 | 2339 | { |
789f3317 | 2340 | struct amdgpu_bo_va *bo_va, *tmp; |
c12a2ee5 | 2341 | struct reservation_object *resv; |
73fb16e7 | 2342 | bool clear; |
789f3317 | 2343 | int r; |
d38ceaf9 | 2344 | |
c12a2ee5 CK |
2345 | list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { |
2346 | /* Per VM BOs never need to bo cleared in the page tables */ | |
2347 | r = amdgpu_vm_bo_update(adev, bo_va, false); | |
2348 | if (r) | |
2349 | return r; | |
2350 | } | |
32b41ac2 | 2351 | |
c12a2ee5 CK |
2352 | spin_lock(&vm->invalidated_lock); |
2353 | while (!list_empty(&vm->invalidated)) { | |
2354 | bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, | |
2355 | base.vm_status); | |
2356 | resv = bo_va->base.bo->tbo.resv; | |
2357 | spin_unlock(&vm->invalidated_lock); | |
ec363e0d | 2358 | |
ec363e0d | 2359 | /* Try to reserve the BO to avoid clearing its ptes */ |
c12a2ee5 | 2360 | if (!amdgpu_vm_debug && reservation_object_trylock(resv)) |
ec363e0d CK |
2361 | clear = false; |
2362 | /* Somebody else is using the BO right now */ | |
2363 | else | |
2364 | clear = true; | |
73fb16e7 CK |
2365 | |
2366 | r = amdgpu_vm_bo_update(adev, bo_va, clear); | |
c12a2ee5 | 2367 | if (r) |
d38ceaf9 AD |
2368 | return r; |
2369 | ||
c12a2ee5 | 2370 | if (!clear) |
ec363e0d | 2371 | reservation_object_unlock(resv); |
c12a2ee5 | 2372 | spin_lock(&vm->invalidated_lock); |
d38ceaf9 | 2373 | } |
c12a2ee5 | 2374 | spin_unlock(&vm->invalidated_lock); |
d38ceaf9 | 2375 | |
789f3317 | 2376 | return 0; |
d38ceaf9 AD |
2377 | } |
2378 | ||
2379 | /** | |
2380 | * amdgpu_vm_bo_add - add a bo to a specific vm | |
2381 | * | |
2382 | * @adev: amdgpu_device pointer | |
2383 | * @vm: requested vm | |
2384 | * @bo: amdgpu buffer object | |
2385 | * | |
8843dbbb | 2386 | * Add @bo into the requested vm. |
d38ceaf9 | 2387 | * Add @bo to the list of bos associated with the vm |
7fc48e59 AG |
2388 | * |
2389 | * Returns: | |
2390 | * Newly added bo_va or NULL for failure | |
d38ceaf9 AD |
2391 | * |
2392 | * Object has to be reserved! | |
2393 | */ | |
2394 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |
2395 | struct amdgpu_vm *vm, | |
2396 | struct amdgpu_bo *bo) | |
2397 | { | |
2398 | struct amdgpu_bo_va *bo_va; | |
2399 | ||
2400 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); | |
2401 | if (bo_va == NULL) { | |
2402 | return NULL; | |
2403 | } | |
3f4299be | 2404 | amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); |
ec681545 | 2405 | |
d38ceaf9 | 2406 | bo_va->ref_count = 1; |
7fc11959 CK |
2407 | INIT_LIST_HEAD(&bo_va->valids); |
2408 | INIT_LIST_HEAD(&bo_va->invalids); | |
32b41ac2 | 2409 | |
d38ceaf9 AD |
2410 | return bo_va; |
2411 | } | |
2412 | ||
73fb16e7 CK |
2413 | |
2414 | /** | |
2415 | * amdgpu_vm_bo_insert_mapping - insert a new mapping | |
2416 | * | |
2417 | * @adev: amdgpu_device pointer | |
2418 | * @bo_va: bo_va to store the address | |
2419 | * @mapping: the mapping to insert | |
2420 | * | |
2421 | * Insert a new mapping into all structures. | |
2422 | */ | |
2423 | static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, | |
2424 | struct amdgpu_bo_va *bo_va, | |
2425 | struct amdgpu_bo_va_mapping *mapping) | |
2426 | { | |
2427 | struct amdgpu_vm *vm = bo_va->base.vm; | |
2428 | struct amdgpu_bo *bo = bo_va->base.bo; | |
2429 | ||
aebc5e6f | 2430 | mapping->bo_va = bo_va; |
73fb16e7 CK |
2431 | list_add(&mapping->list, &bo_va->invalids); |
2432 | amdgpu_vm_it_insert(mapping, &vm->va); | |
2433 | ||
2434 | if (mapping->flags & AMDGPU_PTE_PRT) | |
2435 | amdgpu_vm_prt_get(adev); | |
2436 | ||
862b8c57 CK |
2437 | if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && |
2438 | !bo_va->base.moved) { | |
862b8c57 | 2439 | list_move(&bo_va->base.vm_status, &vm->moved); |
73fb16e7 CK |
2440 | } |
2441 | trace_amdgpu_vm_bo_map(bo_va, mapping); | |
2442 | } | |
2443 | ||
d38ceaf9 AD |
2444 | /** |
2445 | * amdgpu_vm_bo_map - map bo inside a vm | |
2446 | * | |
2447 | * @adev: amdgpu_device pointer | |
2448 | * @bo_va: bo_va to store the address | |
2449 | * @saddr: where to map the BO | |
2450 | * @offset: requested offset in the BO | |
00553cf8 | 2451 | * @size: BO size in bytes |
d38ceaf9 AD |
2452 | * @flags: attributes of pages (read/write/valid/etc.) |
2453 | * | |
2454 | * Add a mapping of the BO at the specefied addr into the VM. | |
7fc48e59 AG |
2455 | * |
2456 | * Returns: | |
2457 | * 0 for success, error for failure. | |
d38ceaf9 | 2458 | * |
49b02b18 | 2459 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
2460 | */ |
2461 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |
2462 | struct amdgpu_bo_va *bo_va, | |
2463 | uint64_t saddr, uint64_t offset, | |
268c3001 | 2464 | uint64_t size, uint64_t flags) |
d38ceaf9 | 2465 | { |
a9f87f64 | 2466 | struct amdgpu_bo_va_mapping *mapping, *tmp; |
ec681545 CK |
2467 | struct amdgpu_bo *bo = bo_va->base.bo; |
2468 | struct amdgpu_vm *vm = bo_va->base.vm; | |
d38ceaf9 | 2469 | uint64_t eaddr; |
d38ceaf9 | 2470 | |
0be52de9 CK |
2471 | /* validate the parameters */ |
2472 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | |
49b02b18 | 2473 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
0be52de9 | 2474 | return -EINVAL; |
0be52de9 | 2475 | |
d38ceaf9 | 2476 | /* make sure object fit at this offset */ |
005ae95e | 2477 | eaddr = saddr + size - 1; |
a5f6b5b1 | 2478 | if (saddr >= eaddr || |
ec681545 | 2479 | (bo && offset + size > amdgpu_bo_size(bo))) |
d38ceaf9 | 2480 | return -EINVAL; |
d38ceaf9 | 2481 | |
d38ceaf9 AD |
2482 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
2483 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
2484 | ||
a9f87f64 CK |
2485 | tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); |
2486 | if (tmp) { | |
d38ceaf9 AD |
2487 | /* bo and tmp overlap, invalid addr */ |
2488 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | |
ec681545 | 2489 | "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, |
a9f87f64 | 2490 | tmp->start, tmp->last + 1); |
663e4577 | 2491 | return -EINVAL; |
d38ceaf9 AD |
2492 | } |
2493 | ||
2494 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | |
663e4577 CK |
2495 | if (!mapping) |
2496 | return -ENOMEM; | |
d38ceaf9 | 2497 | |
a9f87f64 CK |
2498 | mapping->start = saddr; |
2499 | mapping->last = eaddr; | |
d38ceaf9 AD |
2500 | mapping->offset = offset; |
2501 | mapping->flags = flags; | |
2502 | ||
73fb16e7 | 2503 | amdgpu_vm_bo_insert_map(adev, bo_va, mapping); |
80f95c57 CK |
2504 | |
2505 | return 0; | |
2506 | } | |
2507 | ||
2508 | /** | |
2509 | * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings | |
2510 | * | |
2511 | * @adev: amdgpu_device pointer | |
2512 | * @bo_va: bo_va to store the address | |
2513 | * @saddr: where to map the BO | |
2514 | * @offset: requested offset in the BO | |
00553cf8 | 2515 | * @size: BO size in bytes |
80f95c57 CK |
2516 | * @flags: attributes of pages (read/write/valid/etc.) |
2517 | * | |
2518 | * Add a mapping of the BO at the specefied addr into the VM. Replace existing | |
2519 | * mappings as we do so. | |
7fc48e59 AG |
2520 | * |
2521 | * Returns: | |
2522 | * 0 for success, error for failure. | |
80f95c57 CK |
2523 | * |
2524 | * Object has to be reserved and unreserved outside! | |
2525 | */ | |
2526 | int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, | |
2527 | struct amdgpu_bo_va *bo_va, | |
2528 | uint64_t saddr, uint64_t offset, | |
2529 | uint64_t size, uint64_t flags) | |
2530 | { | |
2531 | struct amdgpu_bo_va_mapping *mapping; | |
ec681545 | 2532 | struct amdgpu_bo *bo = bo_va->base.bo; |
80f95c57 CK |
2533 | uint64_t eaddr; |
2534 | int r; | |
2535 | ||
2536 | /* validate the parameters */ | |
2537 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | |
2538 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) | |
2539 | return -EINVAL; | |
2540 | ||
2541 | /* make sure object fit at this offset */ | |
2542 | eaddr = saddr + size - 1; | |
2543 | if (saddr >= eaddr || | |
ec681545 | 2544 | (bo && offset + size > amdgpu_bo_size(bo))) |
80f95c57 CK |
2545 | return -EINVAL; |
2546 | ||
2547 | /* Allocate all the needed memory */ | |
2548 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | |
2549 | if (!mapping) | |
2550 | return -ENOMEM; | |
2551 | ||
ec681545 | 2552 | r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); |
80f95c57 CK |
2553 | if (r) { |
2554 | kfree(mapping); | |
2555 | return r; | |
2556 | } | |
2557 | ||
2558 | saddr /= AMDGPU_GPU_PAGE_SIZE; | |
2559 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
2560 | ||
a9f87f64 CK |
2561 | mapping->start = saddr; |
2562 | mapping->last = eaddr; | |
80f95c57 CK |
2563 | mapping->offset = offset; |
2564 | mapping->flags = flags; | |
2565 | ||
73fb16e7 | 2566 | amdgpu_vm_bo_insert_map(adev, bo_va, mapping); |
4388fc2a | 2567 | |
d38ceaf9 | 2568 | return 0; |
d38ceaf9 AD |
2569 | } |
2570 | ||
2571 | /** | |
2572 | * amdgpu_vm_bo_unmap - remove bo mapping from vm | |
2573 | * | |
2574 | * @adev: amdgpu_device pointer | |
2575 | * @bo_va: bo_va to remove the address from | |
2576 | * @saddr: where to the BO is mapped | |
2577 | * | |
2578 | * Remove a mapping of the BO at the specefied addr from the VM. | |
7fc48e59 AG |
2579 | * |
2580 | * Returns: | |
2581 | * 0 for success, error for failure. | |
d38ceaf9 | 2582 | * |
49b02b18 | 2583 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
2584 | */ |
2585 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |
2586 | struct amdgpu_bo_va *bo_va, | |
2587 | uint64_t saddr) | |
2588 | { | |
2589 | struct amdgpu_bo_va_mapping *mapping; | |
ec681545 | 2590 | struct amdgpu_vm *vm = bo_va->base.vm; |
7fc11959 | 2591 | bool valid = true; |
d38ceaf9 | 2592 | |
6c7fc503 | 2593 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
32b41ac2 | 2594 | |
7fc11959 | 2595 | list_for_each_entry(mapping, &bo_va->valids, list) { |
a9f87f64 | 2596 | if (mapping->start == saddr) |
d38ceaf9 AD |
2597 | break; |
2598 | } | |
2599 | ||
7fc11959 CK |
2600 | if (&mapping->list == &bo_va->valids) { |
2601 | valid = false; | |
2602 | ||
2603 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
a9f87f64 | 2604 | if (mapping->start == saddr) |
7fc11959 CK |
2605 | break; |
2606 | } | |
2607 | ||
32b41ac2 | 2608 | if (&mapping->list == &bo_va->invalids) |
7fc11959 | 2609 | return -ENOENT; |
d38ceaf9 | 2610 | } |
32b41ac2 | 2611 | |
d38ceaf9 | 2612 | list_del(&mapping->list); |
a9f87f64 | 2613 | amdgpu_vm_it_remove(mapping, &vm->va); |
aebc5e6f | 2614 | mapping->bo_va = NULL; |
93e3e438 | 2615 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 | 2616 | |
e17841b9 | 2617 | if (valid) |
d38ceaf9 | 2618 | list_add(&mapping->list, &vm->freed); |
e17841b9 | 2619 | else |
284710fa CK |
2620 | amdgpu_vm_free_mapping(adev, vm, mapping, |
2621 | bo_va->last_pt_update); | |
d38ceaf9 AD |
2622 | |
2623 | return 0; | |
2624 | } | |
2625 | ||
dc54d3d1 CK |
2626 | /** |
2627 | * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range | |
2628 | * | |
2629 | * @adev: amdgpu_device pointer | |
2630 | * @vm: VM structure to use | |
2631 | * @saddr: start of the range | |
2632 | * @size: size of the range | |
2633 | * | |
2634 | * Remove all mappings in a range, split them as appropriate. | |
7fc48e59 AG |
2635 | * |
2636 | * Returns: | |
2637 | * 0 for success, error for failure. | |
dc54d3d1 CK |
2638 | */ |
2639 | int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, | |
2640 | struct amdgpu_vm *vm, | |
2641 | uint64_t saddr, uint64_t size) | |
2642 | { | |
2643 | struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; | |
dc54d3d1 CK |
2644 | LIST_HEAD(removed); |
2645 | uint64_t eaddr; | |
2646 | ||
2647 | eaddr = saddr + size - 1; | |
2648 | saddr /= AMDGPU_GPU_PAGE_SIZE; | |
2649 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
2650 | ||
2651 | /* Allocate all the needed memory */ | |
2652 | before = kzalloc(sizeof(*before), GFP_KERNEL); | |
2653 | if (!before) | |
2654 | return -ENOMEM; | |
27f6d610 | 2655 | INIT_LIST_HEAD(&before->list); |
dc54d3d1 CK |
2656 | |
2657 | after = kzalloc(sizeof(*after), GFP_KERNEL); | |
2658 | if (!after) { | |
2659 | kfree(before); | |
2660 | return -ENOMEM; | |
2661 | } | |
27f6d610 | 2662 | INIT_LIST_HEAD(&after->list); |
dc54d3d1 CK |
2663 | |
2664 | /* Now gather all removed mappings */ | |
a9f87f64 CK |
2665 | tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); |
2666 | while (tmp) { | |
dc54d3d1 | 2667 | /* Remember mapping split at the start */ |
a9f87f64 CK |
2668 | if (tmp->start < saddr) { |
2669 | before->start = tmp->start; | |
2670 | before->last = saddr - 1; | |
dc54d3d1 CK |
2671 | before->offset = tmp->offset; |
2672 | before->flags = tmp->flags; | |
387f49e5 JZ |
2673 | before->bo_va = tmp->bo_va; |
2674 | list_add(&before->list, &tmp->bo_va->invalids); | |
dc54d3d1 CK |
2675 | } |
2676 | ||
2677 | /* Remember mapping split at the end */ | |
a9f87f64 CK |
2678 | if (tmp->last > eaddr) { |
2679 | after->start = eaddr + 1; | |
2680 | after->last = tmp->last; | |
dc54d3d1 | 2681 | after->offset = tmp->offset; |
a9f87f64 | 2682 | after->offset += after->start - tmp->start; |
dc54d3d1 | 2683 | after->flags = tmp->flags; |
387f49e5 JZ |
2684 | after->bo_va = tmp->bo_va; |
2685 | list_add(&after->list, &tmp->bo_va->invalids); | |
dc54d3d1 CK |
2686 | } |
2687 | ||
2688 | list_del(&tmp->list); | |
2689 | list_add(&tmp->list, &removed); | |
a9f87f64 CK |
2690 | |
2691 | tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); | |
dc54d3d1 CK |
2692 | } |
2693 | ||
2694 | /* And free them up */ | |
2695 | list_for_each_entry_safe(tmp, next, &removed, list) { | |
a9f87f64 | 2696 | amdgpu_vm_it_remove(tmp, &vm->va); |
dc54d3d1 CK |
2697 | list_del(&tmp->list); |
2698 | ||
a9f87f64 CK |
2699 | if (tmp->start < saddr) |
2700 | tmp->start = saddr; | |
2701 | if (tmp->last > eaddr) | |
2702 | tmp->last = eaddr; | |
dc54d3d1 | 2703 | |
aebc5e6f | 2704 | tmp->bo_va = NULL; |
dc54d3d1 CK |
2705 | list_add(&tmp->list, &vm->freed); |
2706 | trace_amdgpu_vm_bo_unmap(NULL, tmp); | |
2707 | } | |
2708 | ||
27f6d610 JZ |
2709 | /* Insert partial mapping before the range */ |
2710 | if (!list_empty(&before->list)) { | |
a9f87f64 | 2711 | amdgpu_vm_it_insert(before, &vm->va); |
dc54d3d1 CK |
2712 | if (before->flags & AMDGPU_PTE_PRT) |
2713 | amdgpu_vm_prt_get(adev); | |
2714 | } else { | |
2715 | kfree(before); | |
2716 | } | |
2717 | ||
2718 | /* Insert partial mapping after the range */ | |
27f6d610 | 2719 | if (!list_empty(&after->list)) { |
a9f87f64 | 2720 | amdgpu_vm_it_insert(after, &vm->va); |
dc54d3d1 CK |
2721 | if (after->flags & AMDGPU_PTE_PRT) |
2722 | amdgpu_vm_prt_get(adev); | |
2723 | } else { | |
2724 | kfree(after); | |
2725 | } | |
2726 | ||
2727 | return 0; | |
2728 | } | |
2729 | ||
aebc5e6f CK |
2730 | /** |
2731 | * amdgpu_vm_bo_lookup_mapping - find mapping by address | |
2732 | * | |
2733 | * @vm: the requested VM | |
00553cf8 | 2734 | * @addr: the address |
aebc5e6f CK |
2735 | * |
2736 | * Find a mapping by it's address. | |
7fc48e59 AG |
2737 | * |
2738 | * Returns: | |
2739 | * The amdgpu_bo_va_mapping matching for addr or NULL | |
2740 | * | |
aebc5e6f CK |
2741 | */ |
2742 | struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, | |
2743 | uint64_t addr) | |
2744 | { | |
2745 | return amdgpu_vm_it_iter_first(&vm->va, addr, addr); | |
2746 | } | |
2747 | ||
8ab19ea6 CK |
2748 | /** |
2749 | * amdgpu_vm_bo_trace_cs - trace all reserved mappings | |
2750 | * | |
2751 | * @vm: the requested vm | |
2752 | * @ticket: CS ticket | |
2753 | * | |
2754 | * Trace all mappings of BOs reserved during a command submission. | |
2755 | */ | |
2756 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) | |
2757 | { | |
2758 | struct amdgpu_bo_va_mapping *mapping; | |
2759 | ||
2760 | if (!trace_amdgpu_vm_bo_cs_enabled()) | |
2761 | return; | |
2762 | ||
2763 | for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; | |
2764 | mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { | |
2765 | if (mapping->bo_va && mapping->bo_va->base.bo) { | |
2766 | struct amdgpu_bo *bo; | |
2767 | ||
2768 | bo = mapping->bo_va->base.bo; | |
2769 | if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket) | |
2770 | continue; | |
2771 | } | |
2772 | ||
2773 | trace_amdgpu_vm_bo_cs(mapping); | |
2774 | } | |
2775 | } | |
2776 | ||
d38ceaf9 AD |
2777 | /** |
2778 | * amdgpu_vm_bo_rmv - remove a bo to a specific vm | |
2779 | * | |
2780 | * @adev: amdgpu_device pointer | |
2781 | * @bo_va: requested bo_va | |
2782 | * | |
8843dbbb | 2783 | * Remove @bo_va->bo from the requested vm. |
d38ceaf9 AD |
2784 | * |
2785 | * Object have to be reserved! | |
2786 | */ | |
2787 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |
2788 | struct amdgpu_bo_va *bo_va) | |
2789 | { | |
2790 | struct amdgpu_bo_va_mapping *mapping, *next; | |
fbbf794c | 2791 | struct amdgpu_bo *bo = bo_va->base.bo; |
ec681545 | 2792 | struct amdgpu_vm *vm = bo_va->base.vm; |
646b9025 | 2793 | struct amdgpu_vm_bo_base **base; |
d38ceaf9 | 2794 | |
646b9025 CK |
2795 | if (bo) { |
2796 | if (bo->tbo.resv == vm->root.base.bo->tbo.resv) | |
2797 | vm->bulk_moveable = false; | |
fbbf794c | 2798 | |
646b9025 CK |
2799 | for (base = &bo_va->base.bo->vm_bo; *base; |
2800 | base = &(*base)->next) { | |
2801 | if (*base != &bo_va->base) | |
2802 | continue; | |
2803 | ||
2804 | *base = bo_va->base.next; | |
2805 | break; | |
2806 | } | |
2807 | } | |
d38ceaf9 | 2808 | |
c12a2ee5 | 2809 | spin_lock(&vm->invalidated_lock); |
ec681545 | 2810 | list_del(&bo_va->base.vm_status); |
c12a2ee5 | 2811 | spin_unlock(&vm->invalidated_lock); |
d38ceaf9 | 2812 | |
7fc11959 | 2813 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
d38ceaf9 | 2814 | list_del(&mapping->list); |
a9f87f64 | 2815 | amdgpu_vm_it_remove(mapping, &vm->va); |
aebc5e6f | 2816 | mapping->bo_va = NULL; |
93e3e438 | 2817 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
7fc11959 CK |
2818 | list_add(&mapping->list, &vm->freed); |
2819 | } | |
2820 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | |
2821 | list_del(&mapping->list); | |
a9f87f64 | 2822 | amdgpu_vm_it_remove(mapping, &vm->va); |
284710fa CK |
2823 | amdgpu_vm_free_mapping(adev, vm, mapping, |
2824 | bo_va->last_pt_update); | |
d38ceaf9 | 2825 | } |
32b41ac2 | 2826 | |
f54d1867 | 2827 | dma_fence_put(bo_va->last_pt_update); |
d38ceaf9 | 2828 | kfree(bo_va); |
d38ceaf9 AD |
2829 | } |
2830 | ||
2831 | /** | |
2832 | * amdgpu_vm_bo_invalidate - mark the bo as invalid | |
2833 | * | |
2834 | * @adev: amdgpu_device pointer | |
d38ceaf9 | 2835 | * @bo: amdgpu buffer object |
00553cf8 | 2836 | * @evicted: is the BO evicted |
d38ceaf9 | 2837 | * |
8843dbbb | 2838 | * Mark @bo as invalid. |
d38ceaf9 AD |
2839 | */ |
2840 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | |
3f3333f8 | 2841 | struct amdgpu_bo *bo, bool evicted) |
d38ceaf9 | 2842 | { |
ec681545 CK |
2843 | struct amdgpu_vm_bo_base *bo_base; |
2844 | ||
4bebccee CZ |
2845 | /* shadow bo doesn't have bo base, its validation needs its parent */ |
2846 | if (bo->parent && bo->parent->shadow == bo) | |
2847 | bo = bo->parent; | |
2848 | ||
646b9025 | 2849 | for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { |
3f3333f8 CK |
2850 | struct amdgpu_vm *vm = bo_base->vm; |
2851 | ||
3f3333f8 | 2852 | if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { |
bcdc9fd6 | 2853 | amdgpu_vm_bo_evicted(bo_base); |
3f3333f8 CK |
2854 | continue; |
2855 | } | |
2856 | ||
bcdc9fd6 | 2857 | if (bo_base->moved) |
3f3333f8 | 2858 | continue; |
bcdc9fd6 | 2859 | bo_base->moved = true; |
3f3333f8 | 2860 | |
bcdc9fd6 CK |
2861 | if (bo->tbo.type == ttm_bo_type_kernel) |
2862 | amdgpu_vm_bo_relocated(bo_base); | |
2863 | else if (bo->tbo.resv == vm->root.base.bo->tbo.resv) | |
2864 | amdgpu_vm_bo_moved(bo_base); | |
2865 | else | |
2866 | amdgpu_vm_bo_invalidated(bo_base); | |
d38ceaf9 AD |
2867 | } |
2868 | } | |
2869 | ||
7fc48e59 AG |
2870 | /** |
2871 | * amdgpu_vm_get_block_size - calculate VM page table size as power of two | |
2872 | * | |
2873 | * @vm_size: VM size | |
2874 | * | |
2875 | * Returns: | |
2876 | * VM page table as power of two | |
2877 | */ | |
bab4fee7 JZ |
2878 | static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) |
2879 | { | |
2880 | /* Total bits covered by PD + PTs */ | |
2881 | unsigned bits = ilog2(vm_size) + 18; | |
2882 | ||
2883 | /* Make sure the PD is 4K in size up to 8GB address space. | |
2884 | Above that split equal between PD and PTs */ | |
2885 | if (vm_size <= 8) | |
2886 | return (bits - 9); | |
2887 | else | |
2888 | return ((bits + 3) / 2); | |
2889 | } | |
2890 | ||
d07f14be RH |
2891 | /** |
2892 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size | |
bab4fee7 JZ |
2893 | * |
2894 | * @adev: amdgpu_device pointer | |
43370c4c | 2895 | * @min_vm_size: the minimum vm size in GB if it's set auto |
00553cf8 AG |
2896 | * @fragment_size_default: Default PTE fragment size |
2897 | * @max_level: max VMPT level | |
2898 | * @max_bits: max address space size in bits | |
2899 | * | |
bab4fee7 | 2900 | */ |
43370c4c | 2901 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
f3368128 CK |
2902 | uint32_t fragment_size_default, unsigned max_level, |
2903 | unsigned max_bits) | |
bab4fee7 | 2904 | { |
43370c4c FK |
2905 | unsigned int max_size = 1 << (max_bits - 30); |
2906 | unsigned int vm_size; | |
36539dce CK |
2907 | uint64_t tmp; |
2908 | ||
2909 | /* adjust vm size first */ | |
f3368128 | 2910 | if (amdgpu_vm_size != -1) { |
fdd5faaa | 2911 | vm_size = amdgpu_vm_size; |
f3368128 CK |
2912 | if (vm_size > max_size) { |
2913 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", | |
2914 | amdgpu_vm_size, max_size); | |
2915 | vm_size = max_size; | |
2916 | } | |
43370c4c FK |
2917 | } else { |
2918 | struct sysinfo si; | |
2919 | unsigned int phys_ram_gb; | |
2920 | ||
2921 | /* Optimal VM size depends on the amount of physical | |
2922 | * RAM available. Underlying requirements and | |
2923 | * assumptions: | |
2924 | * | |
2925 | * - Need to map system memory and VRAM from all GPUs | |
2926 | * - VRAM from other GPUs not known here | |
2927 | * - Assume VRAM <= system memory | |
2928 | * - On GFX8 and older, VM space can be segmented for | |
2929 | * different MTYPEs | |
2930 | * - Need to allow room for fragmentation, guard pages etc. | |
2931 | * | |
2932 | * This adds up to a rough guess of system memory x3. | |
2933 | * Round up to power of two to maximize the available | |
2934 | * VM size with the given page table size. | |
2935 | */ | |
2936 | si_meminfo(&si); | |
2937 | phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + | |
2938 | (1 << 30) - 1) >> 30; | |
2939 | vm_size = roundup_pow_of_two( | |
2940 | min(max(phys_ram_gb * 3, min_vm_size), max_size)); | |
f3368128 | 2941 | } |
fdd5faaa CK |
2942 | |
2943 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; | |
36539dce CK |
2944 | |
2945 | tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); | |
97489129 CK |
2946 | if (amdgpu_vm_block_size != -1) |
2947 | tmp >>= amdgpu_vm_block_size - 9; | |
36539dce CK |
2948 | tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; |
2949 | adev->vm_manager.num_level = min(max_level, (unsigned)tmp); | |
196f7489 CZ |
2950 | switch (adev->vm_manager.num_level) { |
2951 | case 3: | |
2952 | adev->vm_manager.root_level = AMDGPU_VM_PDB2; | |
2953 | break; | |
2954 | case 2: | |
2955 | adev->vm_manager.root_level = AMDGPU_VM_PDB1; | |
2956 | break; | |
2957 | case 1: | |
2958 | adev->vm_manager.root_level = AMDGPU_VM_PDB0; | |
2959 | break; | |
2960 | default: | |
2961 | dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); | |
2962 | } | |
b38f41eb | 2963 | /* block size depends on vm size and hw setup*/ |
97489129 | 2964 | if (amdgpu_vm_block_size != -1) |
bab4fee7 | 2965 | adev->vm_manager.block_size = |
97489129 CK |
2966 | min((unsigned)amdgpu_vm_block_size, max_bits |
2967 | - AMDGPU_GPU_PAGE_SHIFT | |
2968 | - 9 * adev->vm_manager.num_level); | |
2969 | else if (adev->vm_manager.num_level > 1) | |
2970 | adev->vm_manager.block_size = 9; | |
bab4fee7 | 2971 | else |
97489129 | 2972 | adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); |
bab4fee7 | 2973 | |
b38f41eb CK |
2974 | if (amdgpu_vm_fragment_size == -1) |
2975 | adev->vm_manager.fragment_size = fragment_size_default; | |
2976 | else | |
2977 | adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; | |
d07f14be | 2978 | |
36539dce CK |
2979 | DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", |
2980 | vm_size, adev->vm_manager.num_level + 1, | |
2981 | adev->vm_manager.block_size, | |
fdd5faaa | 2982 | adev->vm_manager.fragment_size); |
bab4fee7 JZ |
2983 | } |
2984 | ||
240cd9a6 OZ |
2985 | static struct amdgpu_retryfault_hashtable *init_fault_hash(void) |
2986 | { | |
2987 | struct amdgpu_retryfault_hashtable *fault_hash; | |
2988 | ||
2989 | fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL); | |
2990 | if (!fault_hash) | |
2991 | return fault_hash; | |
2992 | ||
2993 | INIT_CHASH_TABLE(fault_hash->hash, | |
2994 | AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); | |
2995 | spin_lock_init(&fault_hash->lock); | |
2996 | fault_hash->count = 0; | |
2997 | ||
2998 | return fault_hash; | |
2999 | } | |
3000 | ||
d38ceaf9 AD |
3001 | /** |
3002 | * amdgpu_vm_init - initialize a vm instance | |
3003 | * | |
3004 | * @adev: amdgpu_device pointer | |
3005 | * @vm: requested vm | |
9a4b7d4c | 3006 | * @vm_context: Indicates if it GFX or Compute context |
00553cf8 | 3007 | * @pasid: Process address space identifier |
d38ceaf9 | 3008 | * |
8843dbbb | 3009 | * Init @vm fields. |
7fc48e59 AG |
3010 | * |
3011 | * Returns: | |
3012 | * 0 for success, error for failure. | |
d38ceaf9 | 3013 | */ |
9a4b7d4c | 3014 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
02208441 | 3015 | int vm_context, unsigned int pasid) |
d38ceaf9 | 3016 | { |
3216c6b7 | 3017 | struct amdgpu_bo_param bp; |
3f4299be | 3018 | struct amdgpu_bo *root; |
36bbf3bf | 3019 | int r, i; |
d38ceaf9 | 3020 | |
f808c13f | 3021 | vm->va = RB_ROOT_CACHED; |
36bbf3bf CZ |
3022 | for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) |
3023 | vm->reserved_vmid[i] = NULL; | |
3f3333f8 | 3024 | INIT_LIST_HEAD(&vm->evicted); |
ea09729c | 3025 | INIT_LIST_HEAD(&vm->relocated); |
27c7b9ae | 3026 | INIT_LIST_HEAD(&vm->moved); |
806f043f | 3027 | INIT_LIST_HEAD(&vm->idle); |
c12a2ee5 CK |
3028 | INIT_LIST_HEAD(&vm->invalidated); |
3029 | spin_lock_init(&vm->invalidated_lock); | |
d38ceaf9 | 3030 | INIT_LIST_HEAD(&vm->freed); |
20250215 | 3031 | |
2bd9ccfa | 3032 | /* create scheduler entity for page table updates */ |
3798e9a6 CK |
3033 | r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs, |
3034 | adev->vm_manager.vm_pte_num_rqs, NULL); | |
2bd9ccfa | 3035 | if (r) |
f566ceb1 | 3036 | return r; |
2bd9ccfa | 3037 | |
51ac7eec YZ |
3038 | vm->pte_support_ats = false; |
3039 | ||
3040 | if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { | |
9a4b7d4c HK |
3041 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & |
3042 | AMDGPU_VM_USE_CPU_FOR_COMPUTE); | |
51ac7eec | 3043 | |
741deade | 3044 | if (adev->asic_type == CHIP_RAVEN) |
51ac7eec | 3045 | vm->pte_support_ats = true; |
13307f7e | 3046 | } else { |
9a4b7d4c HK |
3047 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & |
3048 | AMDGPU_VM_USE_CPU_FOR_GFX); | |
13307f7e | 3049 | } |
9a4b7d4c HK |
3050 | DRM_DEBUG_DRIVER("VM update mode is %s\n", |
3051 | vm->use_cpu_for_update ? "CPU" : "SDMA"); | |
0855c9c9 | 3052 | WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), |
9a4b7d4c | 3053 | "CPU update of VM recommended only for large BAR system\n"); |
d5884513 | 3054 | vm->last_update = NULL; |
05906dec | 3055 | |
e21eb261 | 3056 | amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp); |
03e9dee1 FK |
3057 | if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) |
3058 | bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; | |
3f4299be | 3059 | r = amdgpu_bo_create(adev, &bp, &root); |
d38ceaf9 | 3060 | if (r) |
2bd9ccfa CK |
3061 | goto error_free_sched_entity; |
3062 | ||
3f4299be | 3063 | r = amdgpu_bo_reserve(root, true); |
d3aab672 CK |
3064 | if (r) |
3065 | goto error_free_root; | |
3066 | ||
0aa7aa24 CK |
3067 | r = reservation_object_reserve_shared(root->tbo.resv, 1); |
3068 | if (r) | |
3069 | goto error_unreserve; | |
3070 | ||
1e293037 CK |
3071 | amdgpu_vm_bo_base_init(&vm->root.base, vm, root); |
3072 | ||
780637cb | 3073 | r = amdgpu_vm_clear_bo(adev, vm, root); |
13307f7e CK |
3074 | if (r) |
3075 | goto error_unreserve; | |
3076 | ||
d3aab672 | 3077 | amdgpu_bo_unreserve(vm->root.base.bo); |
d38ceaf9 | 3078 | |
02208441 FK |
3079 | if (pasid) { |
3080 | unsigned long flags; | |
3081 | ||
3082 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); | |
3083 | r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, | |
3084 | GFP_ATOMIC); | |
3085 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); | |
3086 | if (r < 0) | |
3087 | goto error_free_root; | |
3088 | ||
3089 | vm->pasid = pasid; | |
0a096fb6 CK |
3090 | } |
3091 | ||
240cd9a6 OZ |
3092 | vm->fault_hash = init_fault_hash(); |
3093 | if (!vm->fault_hash) { | |
3094 | r = -ENOMEM; | |
3095 | goto error_free_root; | |
3096 | } | |
3097 | ||
a2f14820 | 3098 | INIT_KFIFO(vm->faults); |
d38ceaf9 AD |
3099 | |
3100 | return 0; | |
2bd9ccfa | 3101 | |
13307f7e CK |
3102 | error_unreserve: |
3103 | amdgpu_bo_unreserve(vm->root.base.bo); | |
3104 | ||
67003a15 | 3105 | error_free_root: |
3f3333f8 CK |
3106 | amdgpu_bo_unref(&vm->root.base.bo->shadow); |
3107 | amdgpu_bo_unref(&vm->root.base.bo); | |
3108 | vm->root.base.bo = NULL; | |
2bd9ccfa CK |
3109 | |
3110 | error_free_sched_entity: | |
cdc50176 | 3111 | drm_sched_entity_destroy(&vm->entity); |
2bd9ccfa CK |
3112 | |
3113 | return r; | |
d38ceaf9 AD |
3114 | } |
3115 | ||
b236fa1d FK |
3116 | /** |
3117 | * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM | |
3118 | * | |
7fc48e59 AG |
3119 | * @adev: amdgpu_device pointer |
3120 | * @vm: requested vm | |
3121 | * | |
b236fa1d FK |
3122 | * This only works on GFX VMs that don't have any BOs added and no |
3123 | * page tables allocated yet. | |
3124 | * | |
3125 | * Changes the following VM parameters: | |
3126 | * - use_cpu_for_update | |
3127 | * - pte_supports_ats | |
3128 | * - pasid (old PASID is released, because compute manages its own PASIDs) | |
3129 | * | |
3130 | * Reinitializes the page directory to reflect the changed ATS | |
b5d21aac | 3131 | * setting. |
b236fa1d | 3132 | * |
7fc48e59 AG |
3133 | * Returns: |
3134 | * 0 for success, -errno for errors. | |
b236fa1d | 3135 | */ |
1685b01a | 3136 | int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) |
b236fa1d | 3137 | { |
741deade | 3138 | bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); |
b236fa1d FK |
3139 | int r; |
3140 | ||
3141 | r = amdgpu_bo_reserve(vm->root.base.bo, true); | |
3142 | if (r) | |
3143 | return r; | |
3144 | ||
3145 | /* Sanity checks */ | |
3146 | if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) { | |
3147 | r = -EINVAL; | |
1685b01a OZ |
3148 | goto unreserve_bo; |
3149 | } | |
3150 | ||
3151 | if (pasid) { | |
3152 | unsigned long flags; | |
3153 | ||
3154 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); | |
3155 | r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, | |
3156 | GFP_ATOMIC); | |
3157 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); | |
3158 | ||
3159 | if (r == -ENOSPC) | |
3160 | goto unreserve_bo; | |
3161 | r = 0; | |
b236fa1d FK |
3162 | } |
3163 | ||
3164 | /* Check if PD needs to be reinitialized and do it before | |
3165 | * changing any other state, in case it fails. | |
3166 | */ | |
3167 | if (pte_support_ats != vm->pte_support_ats) { | |
780637cb CK |
3168 | vm->pte_support_ats = pte_support_ats; |
3169 | r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo); | |
b236fa1d | 3170 | if (r) |
1685b01a | 3171 | goto free_idr; |
b236fa1d FK |
3172 | } |
3173 | ||
3174 | /* Update VM state */ | |
3175 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & | |
3176 | AMDGPU_VM_USE_CPU_FOR_COMPUTE); | |
b236fa1d FK |
3177 | DRM_DEBUG_DRIVER("VM update mode is %s\n", |
3178 | vm->use_cpu_for_update ? "CPU" : "SDMA"); | |
0855c9c9 | 3179 | WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), |
b236fa1d FK |
3180 | "CPU update of VM recommended only for large BAR system\n"); |
3181 | ||
3182 | if (vm->pasid) { | |
3183 | unsigned long flags; | |
3184 | ||
3185 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); | |
3186 | idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); | |
3187 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); | |
3188 | ||
1685b01a OZ |
3189 | /* Free the original amdgpu allocated pasid |
3190 | * Will be replaced with kfd allocated pasid | |
3191 | */ | |
3192 | amdgpu_pasid_free(vm->pasid); | |
b236fa1d FK |
3193 | vm->pasid = 0; |
3194 | } | |
3195 | ||
b5d21aac SL |
3196 | /* Free the shadow bo for compute VM */ |
3197 | amdgpu_bo_unref(&vm->root.base.bo->shadow); | |
3198 | ||
1685b01a OZ |
3199 | if (pasid) |
3200 | vm->pasid = pasid; | |
3201 | ||
3202 | goto unreserve_bo; | |
3203 | ||
3204 | free_idr: | |
3205 | if (pasid) { | |
3206 | unsigned long flags; | |
3207 | ||
3208 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); | |
3209 | idr_remove(&adev->vm_manager.pasid_idr, pasid); | |
3210 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); | |
3211 | } | |
3212 | unreserve_bo: | |
b236fa1d FK |
3213 | amdgpu_bo_unreserve(vm->root.base.bo); |
3214 | return r; | |
3215 | } | |
3216 | ||
bf47afba OZ |
3217 | /** |
3218 | * amdgpu_vm_release_compute - release a compute vm | |
3219 | * @adev: amdgpu_device pointer | |
3220 | * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute | |
3221 | * | |
3222 | * This is a correspondant of amdgpu_vm_make_compute. It decouples compute | |
3223 | * pasid from vm. Compute should stop use of vm after this call. | |
3224 | */ | |
3225 | void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
3226 | { | |
3227 | if (vm->pasid) { | |
3228 | unsigned long flags; | |
3229 | ||
3230 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); | |
3231 | idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); | |
3232 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); | |
3233 | } | |
3234 | vm->pasid = 0; | |
3235 | } | |
3236 | ||
d38ceaf9 AD |
3237 | /** |
3238 | * amdgpu_vm_fini - tear down a vm instance | |
3239 | * | |
3240 | * @adev: amdgpu_device pointer | |
3241 | * @vm: requested vm | |
3242 | * | |
8843dbbb | 3243 | * Tear down @vm. |
d38ceaf9 AD |
3244 | * Unbind the VM and remove all bos from the vm bo list |
3245 | */ | |
3246 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
3247 | { | |
3248 | struct amdgpu_bo_va_mapping *mapping, *tmp; | |
132f34e4 | 3249 | bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; |
2642cf11 | 3250 | struct amdgpu_bo *root; |
a2f14820 | 3251 | u64 fault; |
2642cf11 | 3252 | int i, r; |
d38ceaf9 | 3253 | |
ede0dd86 FK |
3254 | amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); |
3255 | ||
a2f14820 FK |
3256 | /* Clear pending page faults from IH when the VM is destroyed */ |
3257 | while (kfifo_get(&vm->faults, &fault)) | |
240cd9a6 | 3258 | amdgpu_vm_clear_fault(vm->fault_hash, fault); |
a2f14820 | 3259 | |
02208441 FK |
3260 | if (vm->pasid) { |
3261 | unsigned long flags; | |
3262 | ||
3263 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); | |
3264 | idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); | |
3265 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); | |
3266 | } | |
3267 | ||
240cd9a6 OZ |
3268 | kfree(vm->fault_hash); |
3269 | vm->fault_hash = NULL; | |
3270 | ||
cdc50176 | 3271 | drm_sched_entity_destroy(&vm->entity); |
2bd9ccfa | 3272 | |
f808c13f | 3273 | if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { |
d38ceaf9 AD |
3274 | dev_err(adev->dev, "still active bo inside vm\n"); |
3275 | } | |
f808c13f DB |
3276 | rbtree_postorder_for_each_entry_safe(mapping, tmp, |
3277 | &vm->va.rb_root, rb) { | |
0af5c656 CK |
3278 | /* Don't remove the mapping here, we don't want to trigger a |
3279 | * rebalance and the tree is about to be destroyed anyway. | |
3280 | */ | |
d38ceaf9 | 3281 | list_del(&mapping->list); |
d38ceaf9 AD |
3282 | kfree(mapping); |
3283 | } | |
3284 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { | |
4388fc2a | 3285 | if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { |
451bc8eb | 3286 | amdgpu_vm_prt_fini(adev, vm); |
4388fc2a | 3287 | prt_fini_needed = false; |
451bc8eb | 3288 | } |
284710fa | 3289 | |
d38ceaf9 | 3290 | list_del(&mapping->list); |
451bc8eb | 3291 | amdgpu_vm_free_mapping(adev, vm, mapping, NULL); |
d38ceaf9 AD |
3292 | } |
3293 | ||
2642cf11 CK |
3294 | root = amdgpu_bo_ref(vm->root.base.bo); |
3295 | r = amdgpu_bo_reserve(root, true); | |
3296 | if (r) { | |
3297 | dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); | |
3298 | } else { | |
229a37f8 | 3299 | amdgpu_vm_free_pts(adev, vm); |
2642cf11 CK |
3300 | amdgpu_bo_unreserve(root); |
3301 | } | |
3302 | amdgpu_bo_unref(&root); | |
d5884513 | 3303 | dma_fence_put(vm->last_update); |
1e9ef26f | 3304 | for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) |
620f774f | 3305 | amdgpu_vmid_free_reserved(adev, vm, i); |
d38ceaf9 | 3306 | } |
ea89f8c9 | 3307 | |
a9a78b32 CK |
3308 | /** |
3309 | * amdgpu_vm_manager_init - init the VM manager | |
3310 | * | |
3311 | * @adev: amdgpu_device pointer | |
3312 | * | |
3313 | * Initialize the VM manager structures | |
3314 | */ | |
3315 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |
3316 | { | |
620f774f | 3317 | unsigned i; |
a9a78b32 | 3318 | |
620f774f | 3319 | amdgpu_vmid_mgr_init(adev); |
2d55e45a | 3320 | |
f54d1867 CW |
3321 | adev->vm_manager.fence_context = |
3322 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); | |
1fbb2e92 CK |
3323 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
3324 | adev->vm_manager.seqno[i] = 0; | |
3325 | ||
284710fa | 3326 | spin_lock_init(&adev->vm_manager.prt_lock); |
451bc8eb | 3327 | atomic_set(&adev->vm_manager.num_prt_users, 0); |
9a4b7d4c HK |
3328 | |
3329 | /* If not overridden by the user, by default, only in large BAR systems | |
3330 | * Compute VM tables will be updated by CPU | |
3331 | */ | |
3332 | #ifdef CONFIG_X86_64 | |
3333 | if (amdgpu_vm_update_mode == -1) { | |
c8c5e569 | 3334 | if (amdgpu_gmc_vram_full_visible(&adev->gmc)) |
9a4b7d4c HK |
3335 | adev->vm_manager.vm_update_mode = |
3336 | AMDGPU_VM_USE_CPU_FOR_COMPUTE; | |
3337 | else | |
3338 | adev->vm_manager.vm_update_mode = 0; | |
3339 | } else | |
3340 | adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; | |
3341 | #else | |
3342 | adev->vm_manager.vm_update_mode = 0; | |
3343 | #endif | |
3344 | ||
02208441 FK |
3345 | idr_init(&adev->vm_manager.pasid_idr); |
3346 | spin_lock_init(&adev->vm_manager.pasid_lock); | |
a9a78b32 CK |
3347 | } |
3348 | ||
ea89f8c9 CK |
3349 | /** |
3350 | * amdgpu_vm_manager_fini - cleanup VM manager | |
3351 | * | |
3352 | * @adev: amdgpu_device pointer | |
3353 | * | |
3354 | * Cleanup the VM manager and free resources. | |
3355 | */ | |
3356 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |
3357 | { | |
02208441 FK |
3358 | WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); |
3359 | idr_destroy(&adev->vm_manager.pasid_idr); | |
3360 | ||
620f774f | 3361 | amdgpu_vmid_mgr_fini(adev); |
ea89f8c9 | 3362 | } |
cfbcacf4 | 3363 | |
7fc48e59 AG |
3364 | /** |
3365 | * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. | |
3366 | * | |
3367 | * @dev: drm device pointer | |
3368 | * @data: drm_amdgpu_vm | |
3369 | * @filp: drm file pointer | |
3370 | * | |
3371 | * Returns: | |
3372 | * 0 for success, -errno for errors. | |
3373 | */ | |
cfbcacf4 CZ |
3374 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
3375 | { | |
3376 | union drm_amdgpu_vm *args = data; | |
1e9ef26f CZ |
3377 | struct amdgpu_device *adev = dev->dev_private; |
3378 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | |
3379 | int r; | |
cfbcacf4 CZ |
3380 | |
3381 | switch (args->in.op) { | |
3382 | case AMDGPU_VM_OP_RESERVE_VMID: | |
1e9ef26f | 3383 | /* current, we only have requirement to reserve vmid from gfxhub */ |
620f774f | 3384 | r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); |
1e9ef26f CZ |
3385 | if (r) |
3386 | return r; | |
3387 | break; | |
cfbcacf4 | 3388 | case AMDGPU_VM_OP_UNRESERVE_VMID: |
620f774f | 3389 | amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); |
cfbcacf4 CZ |
3390 | break; |
3391 | default: | |
3392 | return -EINVAL; | |
3393 | } | |
3394 | ||
3395 | return 0; | |
3396 | } | |
2aa37bf5 AG |
3397 | |
3398 | /** | |
3399 | * amdgpu_vm_get_task_info - Extracts task info for a PASID. | |
3400 | * | |
989edc69 | 3401 | * @adev: drm device pointer |
2aa37bf5 AG |
3402 | * @pasid: PASID identifier for VM |
3403 | * @task_info: task_info to fill. | |
3404 | */ | |
3405 | void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, | |
3406 | struct amdgpu_task_info *task_info) | |
3407 | { | |
3408 | struct amdgpu_vm *vm; | |
0a5f49cb | 3409 | unsigned long flags; |
2aa37bf5 | 3410 | |
0a5f49cb | 3411 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); |
2aa37bf5 AG |
3412 | |
3413 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); | |
3414 | if (vm) | |
3415 | *task_info = vm->task_info; | |
3416 | ||
0a5f49cb | 3417 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); |
2aa37bf5 AG |
3418 | } |
3419 | ||
3420 | /** | |
3421 | * amdgpu_vm_set_task_info - Sets VMs task info. | |
3422 | * | |
3423 | * @vm: vm for which to set the info | |
3424 | */ | |
3425 | void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) | |
3426 | { | |
3427 | if (!vm->task_info.pid) { | |
3428 | vm->task_info.pid = current->pid; | |
3429 | get_task_comm(vm->task_info.task_name, current); | |
3430 | ||
3431 | if (current->group_leader->mm == current->mm) { | |
3432 | vm->task_info.tgid = current->group_leader->pid; | |
3433 | get_task_comm(vm->task_info.process_name, current->group_leader); | |
3434 | } | |
3435 | } | |
3436 | } | |
240cd9a6 OZ |
3437 | |
3438 | /** | |
3439 | * amdgpu_vm_add_fault - Add a page fault record to fault hash table | |
3440 | * | |
3441 | * @fault_hash: fault hash table | |
3442 | * @key: 64-bit encoding of PASID and address | |
3443 | * | |
3444 | * This should be called when a retry page fault interrupt is | |
3445 | * received. If this is a new page fault, it will be added to a hash | |
3446 | * table. The return value indicates whether this is a new fault, or | |
3447 | * a fault that was already known and is already being handled. | |
3448 | * | |
3449 | * If there are too many pending page faults, this will fail. Retry | |
3450 | * interrupts should be ignored in this case until there is enough | |
3451 | * free space. | |
3452 | * | |
3453 | * Returns 0 if the fault was added, 1 if the fault was already known, | |
3454 | * -ENOSPC if there are too many pending faults. | |
3455 | */ | |
3456 | int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key) | |
3457 | { | |
3458 | unsigned long flags; | |
3459 | int r = -ENOSPC; | |
3460 | ||
3461 | if (WARN_ON_ONCE(!fault_hash)) | |
3462 | /* Should be allocated in amdgpu_vm_init | |
3463 | */ | |
3464 | return r; | |
3465 | ||
3466 | spin_lock_irqsave(&fault_hash->lock, flags); | |
3467 | ||
3468 | /* Only let the hash table fill up to 50% for best performance */ | |
3469 | if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1))) | |
3470 | goto unlock_out; | |
3471 | ||
3472 | r = chash_table_copy_in(&fault_hash->hash, key, NULL); | |
3473 | if (!r) | |
3474 | fault_hash->count++; | |
3475 | ||
3476 | /* chash_table_copy_in should never fail unless we're losing count */ | |
3477 | WARN_ON_ONCE(r < 0); | |
3478 | ||
3479 | unlock_out: | |
3480 | spin_unlock_irqrestore(&fault_hash->lock, flags); | |
3481 | return r; | |
3482 | } | |
3483 | ||
3484 | /** | |
3485 | * amdgpu_vm_clear_fault - Remove a page fault record | |
3486 | * | |
3487 | * @fault_hash: fault hash table | |
3488 | * @key: 64-bit encoding of PASID and address | |
3489 | * | |
3490 | * This should be called when a page fault has been handled. Any | |
3491 | * future interrupt with this key will be processed as a new | |
3492 | * page fault. | |
3493 | */ | |
3494 | void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key) | |
3495 | { | |
3496 | unsigned long flags; | |
3497 | int r; | |
3498 | ||
3499 | if (!fault_hash) | |
3500 | return; | |
3501 | ||
3502 | spin_lock_irqsave(&fault_hash->lock, flags); | |
3503 | ||
3504 | r = chash_table_remove(&fault_hash->hash, key, NULL); | |
3505 | if (!WARN_ON_ONCE(r < 0)) { | |
3506 | fault_hash->count--; | |
3507 | WARN_ON_ONCE(fault_hash->count < 0); | |
3508 | } | |
3509 | ||
3510 | spin_unlock_irqrestore(&fault_hash->lock, flags); | |
3511 | } |