]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_trace.h" | |
32 | ||
33 | /* | |
34 | * GPUVM | |
35 | * GPUVM is similar to the legacy gart on older asics, however | |
36 | * rather than there being a single global gart table | |
37 | * for the entire GPU, there are multiple VM page tables active | |
38 | * at any given time. The VM page tables can contain a mix | |
39 | * vram pages and system memory pages and system memory pages | |
40 | * can be mapped as snooped (cached system pages) or unsnooped | |
41 | * (uncached system pages). | |
42 | * Each VM has an ID associated with it and there is a page table | |
43 | * associated with each VMID. When execting a command buffer, | |
44 | * the kernel tells the the ring what VMID to use for that command | |
45 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
46 | * The userspace drivers maintain their own address space and the kernel | |
47 | * sets up their pages tables accordingly when they submit their | |
48 | * command buffers and a VMID is assigned. | |
49 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
50 | * SI supports 16. | |
51 | */ | |
52 | ||
4ff37a83 CK |
53 | /* Special value that no flush is necessary */ |
54 | #define AMDGPU_VM_NO_FLUSH (~0ll) | |
55 | ||
d38ceaf9 AD |
56 | /** |
57 | * amdgpu_vm_num_pde - return the number of page directory entries | |
58 | * | |
59 | * @adev: amdgpu_device pointer | |
60 | * | |
8843dbbb | 61 | * Calculate the number of page directory entries. |
d38ceaf9 AD |
62 | */ |
63 | static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) | |
64 | { | |
65 | return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; | |
66 | } | |
67 | ||
68 | /** | |
69 | * amdgpu_vm_directory_size - returns the size of the page directory in bytes | |
70 | * | |
71 | * @adev: amdgpu_device pointer | |
72 | * | |
8843dbbb | 73 | * Calculate the size of the page directory in bytes. |
d38ceaf9 AD |
74 | */ |
75 | static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) | |
76 | { | |
77 | return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); | |
78 | } | |
79 | ||
80 | /** | |
56467ebf | 81 | * amdgpu_vm_get_pd_bo - add the VM PD to a validation list |
d38ceaf9 AD |
82 | * |
83 | * @vm: vm providing the BOs | |
3c0eea6c | 84 | * @validated: head of validation list |
56467ebf | 85 | * @entry: entry to add |
d38ceaf9 AD |
86 | * |
87 | * Add the page directory to the list of BOs to | |
56467ebf | 88 | * validate for command submission. |
d38ceaf9 | 89 | */ |
56467ebf CK |
90 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, |
91 | struct list_head *validated, | |
92 | struct amdgpu_bo_list_entry *entry) | |
d38ceaf9 | 93 | { |
56467ebf | 94 | entry->robj = vm->page_directory; |
56467ebf CK |
95 | entry->priority = 0; |
96 | entry->tv.bo = &vm->page_directory->tbo; | |
97 | entry->tv.shared = true; | |
98 | list_add(&entry->tv.head, validated); | |
99 | } | |
d38ceaf9 | 100 | |
56467ebf | 101 | /** |
ee1782c3 | 102 | * amdgpu_vm_get_bos - add the vm BOs to a duplicates list |
56467ebf CK |
103 | * |
104 | * @vm: vm providing the BOs | |
3c0eea6c | 105 | * @duplicates: head of duplicates list |
d38ceaf9 | 106 | * |
ee1782c3 CK |
107 | * Add the page directory to the BO duplicates list |
108 | * for command submission. | |
d38ceaf9 | 109 | */ |
ee1782c3 | 110 | void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) |
d38ceaf9 | 111 | { |
ee1782c3 | 112 | unsigned i; |
d38ceaf9 AD |
113 | |
114 | /* add the vm page table to the list */ | |
ee1782c3 CK |
115 | for (i = 0; i <= vm->max_pde_used; ++i) { |
116 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | |
117 | ||
118 | if (!entry->robj) | |
d38ceaf9 AD |
119 | continue; |
120 | ||
ee1782c3 | 121 | list_add(&entry->tv.head, duplicates); |
d38ceaf9 | 122 | } |
eceb8a15 CK |
123 | |
124 | } | |
125 | ||
126 | /** | |
127 | * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail | |
128 | * | |
129 | * @adev: amdgpu device instance | |
130 | * @vm: vm providing the BOs | |
131 | * | |
132 | * Move the PT BOs to the tail of the LRU. | |
133 | */ | |
134 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | |
135 | struct amdgpu_vm *vm) | |
136 | { | |
137 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | |
138 | unsigned i; | |
139 | ||
140 | spin_lock(&glob->lru_lock); | |
141 | for (i = 0; i <= vm->max_pde_used; ++i) { | |
142 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | |
143 | ||
144 | if (!entry->robj) | |
145 | continue; | |
146 | ||
147 | ttm_bo_move_to_lru_tail(&entry->robj->tbo); | |
148 | } | |
149 | spin_unlock(&glob->lru_lock); | |
d38ceaf9 AD |
150 | } |
151 | ||
152 | /** | |
153 | * amdgpu_vm_grab_id - allocate the next free VMID | |
154 | * | |
d38ceaf9 | 155 | * @vm: vm to allocate id for |
7f8a5290 CK |
156 | * @ring: ring we want to submit job to |
157 | * @sync: sync object where we add dependencies | |
94dd0a4a | 158 | * @fence: fence protecting ID from reuse |
d38ceaf9 | 159 | * |
7f8a5290 | 160 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
d38ceaf9 | 161 | */ |
7f8a5290 | 162 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
4ff37a83 CK |
163 | struct amdgpu_sync *sync, struct fence *fence, |
164 | unsigned *vm_id, uint64_t *vm_pd_addr) | |
d38ceaf9 | 165 | { |
4ff37a83 | 166 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
d38ceaf9 | 167 | struct amdgpu_device *adev = ring->adev; |
4ff37a83 CK |
168 | struct amdgpu_vm_id *id = &vm->ids[ring->idx]; |
169 | struct fence *updates = sync->last_vm_update; | |
a9a78b32 | 170 | int r; |
d38ceaf9 | 171 | |
94dd0a4a CK |
172 | mutex_lock(&adev->vm_manager.lock); |
173 | ||
d38ceaf9 | 174 | /* check if the id is still valid */ |
4ff37a83 CK |
175 | if (id->mgr_id) { |
176 | struct fence *flushed = id->flushed_updates; | |
177 | bool is_later; | |
1c16c0a7 CK |
178 | long owner; |
179 | ||
4ff37a83 CK |
180 | if (!flushed) |
181 | is_later = true; | |
182 | else if (!updates) | |
183 | is_later = false; | |
184 | else | |
185 | is_later = fence_is_later(updates, flushed); | |
186 | ||
187 | owner = atomic_long_read(&id->mgr_id->owner); | |
188 | if (!is_later && owner == (long)id && | |
189 | pd_addr == id->pd_gpu_addr) { | |
190 | ||
a8bd1bec CK |
191 | r = amdgpu_sync_fence(ring->adev, sync, |
192 | id->mgr_id->active); | |
193 | if (r) { | |
194 | mutex_unlock(&adev->vm_manager.lock); | |
195 | return r; | |
196 | } | |
197 | ||
4ff37a83 CK |
198 | fence_put(id->mgr_id->active); |
199 | id->mgr_id->active = fence_get(fence); | |
200 | ||
201 | list_move_tail(&id->mgr_id->list, | |
202 | &adev->vm_manager.ids_lru); | |
d38ceaf9 | 203 | |
4ff37a83 CK |
204 | *vm_id = id->mgr_id - adev->vm_manager.ids; |
205 | *vm_pd_addr = AMDGPU_VM_NO_FLUSH; | |
22073fe7 CK |
206 | trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, |
207 | *vm_pd_addr); | |
d38ceaf9 | 208 | |
94dd0a4a | 209 | mutex_unlock(&adev->vm_manager.lock); |
7f8a5290 | 210 | return 0; |
d38ceaf9 | 211 | } |
d38ceaf9 AD |
212 | } |
213 | ||
4ff37a83 CK |
214 | id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, |
215 | struct amdgpu_vm_manager_id, | |
216 | list); | |
7f8a5290 | 217 | |
4ff37a83 CK |
218 | r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active); |
219 | if (!r) { | |
220 | fence_put(id->mgr_id->active); | |
221 | id->mgr_id->active = fence_get(fence); | |
94dd0a4a | 222 | |
4ff37a83 CK |
223 | fence_put(id->flushed_updates); |
224 | id->flushed_updates = fence_get(updates); | |
94dd0a4a | 225 | |
4ff37a83 | 226 | id->pd_gpu_addr = pd_addr; |
94dd0a4a | 227 | |
4ff37a83 CK |
228 | list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru); |
229 | atomic_long_set(&id->mgr_id->owner, (long)id); | |
230 | ||
231 | *vm_id = id->mgr_id - adev->vm_manager.ids; | |
232 | *vm_pd_addr = pd_addr; | |
22073fe7 | 233 | trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); |
d38ceaf9 AD |
234 | } |
235 | ||
94dd0a4a | 236 | mutex_unlock(&adev->vm_manager.lock); |
a9a78b32 | 237 | return r; |
d38ceaf9 AD |
238 | } |
239 | ||
240 | /** | |
241 | * amdgpu_vm_flush - hardware flush the vm | |
242 | * | |
243 | * @ring: ring to use for flush | |
cffadc83 | 244 | * @vm_id: vmid number to use |
4ff37a83 | 245 | * @pd_addr: address of the page directory |
d38ceaf9 | 246 | * |
4ff37a83 | 247 | * Emit a VM flush when it is necessary. |
d38ceaf9 AD |
248 | */ |
249 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | |
cffadc83 CK |
250 | unsigned vm_id, uint64_t pd_addr, |
251 | uint32_t gds_base, uint32_t gds_size, | |
252 | uint32_t gws_base, uint32_t gws_size, | |
253 | uint32_t oa_base, uint32_t oa_size) | |
d38ceaf9 | 254 | { |
4ff37a83 | 255 | if (pd_addr != AMDGPU_VM_NO_FLUSH) { |
cffadc83 CK |
256 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); |
257 | amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); | |
d38ceaf9 | 258 | } |
cffadc83 CK |
259 | |
260 | if (ring->funcs->emit_gds_switch) | |
261 | amdgpu_ring_emit_gds_switch(ring, vm_id, | |
262 | gds_base, gds_size, | |
263 | gws_base, gws_size, | |
264 | oa_base, oa_size); | |
d38ceaf9 AD |
265 | } |
266 | ||
d38ceaf9 AD |
267 | /** |
268 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | |
269 | * | |
270 | * @vm: requested vm | |
271 | * @bo: requested buffer object | |
272 | * | |
8843dbbb | 273 | * Find @bo inside the requested vm. |
d38ceaf9 AD |
274 | * Search inside the @bos vm list for the requested vm |
275 | * Returns the found bo_va or NULL if none is found | |
276 | * | |
277 | * Object has to be reserved! | |
278 | */ | |
279 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |
280 | struct amdgpu_bo *bo) | |
281 | { | |
282 | struct amdgpu_bo_va *bo_va; | |
283 | ||
284 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
285 | if (bo_va->vm == vm) { | |
286 | return bo_va; | |
287 | } | |
288 | } | |
289 | return NULL; | |
290 | } | |
291 | ||
292 | /** | |
293 | * amdgpu_vm_update_pages - helper to call the right asic function | |
294 | * | |
295 | * @adev: amdgpu_device pointer | |
9ab21462 CK |
296 | * @gtt: GART instance to use for mapping |
297 | * @gtt_flags: GTT hw access flags | |
d38ceaf9 AD |
298 | * @ib: indirect buffer to fill with commands |
299 | * @pe: addr of the page entry | |
300 | * @addr: dst addr to write into pe | |
301 | * @count: number of page entries to update | |
302 | * @incr: increase next addr by incr bytes | |
303 | * @flags: hw access flags | |
d38ceaf9 AD |
304 | * |
305 | * Traces the parameters and calls the right asic functions | |
306 | * to setup the page table using the DMA. | |
307 | */ | |
308 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | |
9ab21462 CK |
309 | struct amdgpu_gart *gtt, |
310 | uint32_t gtt_flags, | |
d38ceaf9 AD |
311 | struct amdgpu_ib *ib, |
312 | uint64_t pe, uint64_t addr, | |
313 | unsigned count, uint32_t incr, | |
9ab21462 | 314 | uint32_t flags) |
d38ceaf9 AD |
315 | { |
316 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | |
317 | ||
9ab21462 CK |
318 | if ((gtt == &adev->gart) && (flags == gtt_flags)) { |
319 | uint64_t src = gtt->table_addr + (addr >> 12) * 8; | |
d38ceaf9 AD |
320 | amdgpu_vm_copy_pte(adev, ib, pe, src, count); |
321 | ||
9ab21462 CK |
322 | } else if (gtt) { |
323 | dma_addr_t *pages_addr = gtt->pages_addr; | |
b07c9d2a CK |
324 | amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, |
325 | count, incr, flags); | |
326 | ||
327 | } else if (count < 3) { | |
328 | amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, | |
329 | count, incr, flags); | |
d38ceaf9 AD |
330 | |
331 | } else { | |
332 | amdgpu_vm_set_pte_pde(adev, ib, pe, addr, | |
333 | count, incr, flags); | |
334 | } | |
335 | } | |
336 | ||
337 | /** | |
338 | * amdgpu_vm_clear_bo - initially clear the page dir/table | |
339 | * | |
340 | * @adev: amdgpu_device pointer | |
341 | * @bo: bo to clear | |
ef9f0a83 CZ |
342 | * |
343 | * need to reserve bo first before calling it. | |
d38ceaf9 AD |
344 | */ |
345 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |
2bd9ccfa | 346 | struct amdgpu_vm *vm, |
d38ceaf9 AD |
347 | struct amdgpu_bo *bo) |
348 | { | |
2d55e45a | 349 | struct amdgpu_ring *ring; |
4af9f07c | 350 | struct fence *fence = NULL; |
d71518b5 | 351 | struct amdgpu_job *job; |
d38ceaf9 AD |
352 | unsigned entries; |
353 | uint64_t addr; | |
354 | int r; | |
355 | ||
2d55e45a CK |
356 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
357 | ||
ca952613 | 358 | r = reservation_object_reserve_shared(bo->tbo.resv); |
359 | if (r) | |
360 | return r; | |
361 | ||
d38ceaf9 AD |
362 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
363 | if (r) | |
ef9f0a83 | 364 | goto error; |
d38ceaf9 AD |
365 | |
366 | addr = amdgpu_bo_gpu_offset(bo); | |
367 | entries = amdgpu_bo_size(bo) / 8; | |
368 | ||
d71518b5 CK |
369 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
370 | if (r) | |
ef9f0a83 | 371 | goto error; |
d38ceaf9 | 372 | |
d71518b5 CK |
373 | amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries, |
374 | 0, 0); | |
375 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | |
376 | ||
377 | WARN_ON(job->ibs[0].length_dw > 64); | |
2bd9ccfa CK |
378 | r = amdgpu_job_submit(job, ring, &vm->entity, |
379 | AMDGPU_FENCE_OWNER_VM, &fence); | |
d38ceaf9 AD |
380 | if (r) |
381 | goto error_free; | |
382 | ||
d71518b5 | 383 | amdgpu_bo_fence(bo, fence, true); |
281b4223 | 384 | fence_put(fence); |
cadf97b1 | 385 | return 0; |
ef9f0a83 | 386 | |
d38ceaf9 | 387 | error_free: |
d71518b5 | 388 | amdgpu_job_free(job); |
d38ceaf9 | 389 | |
ef9f0a83 | 390 | error: |
d38ceaf9 AD |
391 | return r; |
392 | } | |
393 | ||
394 | /** | |
b07c9d2a | 395 | * amdgpu_vm_map_gart - Resolve gart mapping of addr |
d38ceaf9 | 396 | * |
b07c9d2a | 397 | * @pages_addr: optional DMA address to use for lookup |
d38ceaf9 AD |
398 | * @addr: the unmapped addr |
399 | * | |
400 | * Look up the physical address of the page that the pte resolves | |
b07c9d2a | 401 | * to and return the pointer for the page table entry. |
d38ceaf9 | 402 | */ |
b07c9d2a | 403 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) |
d38ceaf9 AD |
404 | { |
405 | uint64_t result; | |
406 | ||
b07c9d2a CK |
407 | if (pages_addr) { |
408 | /* page table offset */ | |
409 | result = pages_addr[addr >> PAGE_SHIFT]; | |
410 | ||
411 | /* in case cpu page size != gpu page size*/ | |
412 | result |= addr & (~PAGE_MASK); | |
413 | ||
414 | } else { | |
415 | /* No mapping required */ | |
416 | result = addr; | |
417 | } | |
d38ceaf9 | 418 | |
b07c9d2a | 419 | result &= 0xFFFFFFFFFFFFF000ULL; |
d38ceaf9 AD |
420 | |
421 | return result; | |
422 | } | |
423 | ||
424 | /** | |
425 | * amdgpu_vm_update_pdes - make sure that page directory is valid | |
426 | * | |
427 | * @adev: amdgpu_device pointer | |
428 | * @vm: requested vm | |
429 | * @start: start of GPU address range | |
430 | * @end: end of GPU address range | |
431 | * | |
432 | * Allocates new page tables if necessary | |
8843dbbb | 433 | * and updates the page directory. |
d38ceaf9 | 434 | * Returns 0 for success, error for failure. |
d38ceaf9 AD |
435 | */ |
436 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |
437 | struct amdgpu_vm *vm) | |
438 | { | |
2d55e45a | 439 | struct amdgpu_ring *ring; |
d38ceaf9 AD |
440 | struct amdgpu_bo *pd = vm->page_directory; |
441 | uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); | |
442 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | |
443 | uint64_t last_pde = ~0, last_pt = ~0; | |
444 | unsigned count = 0, pt_idx, ndw; | |
d71518b5 | 445 | struct amdgpu_job *job; |
d5fc5e82 | 446 | struct amdgpu_ib *ib; |
4af9f07c | 447 | struct fence *fence = NULL; |
d5fc5e82 | 448 | |
d38ceaf9 AD |
449 | int r; |
450 | ||
2d55e45a CK |
451 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
452 | ||
d38ceaf9 AD |
453 | /* padding, etc. */ |
454 | ndw = 64; | |
455 | ||
456 | /* assume the worst case */ | |
457 | ndw += vm->max_pde_used * 6; | |
458 | ||
d71518b5 CK |
459 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
460 | if (r) | |
d38ceaf9 | 461 | return r; |
d71518b5 CK |
462 | |
463 | ib = &job->ibs[0]; | |
d38ceaf9 AD |
464 | |
465 | /* walk over the address space and update the page directory */ | |
466 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | |
ee1782c3 | 467 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; |
d38ceaf9 AD |
468 | uint64_t pde, pt; |
469 | ||
470 | if (bo == NULL) | |
471 | continue; | |
472 | ||
473 | pt = amdgpu_bo_gpu_offset(bo); | |
474 | if (vm->page_tables[pt_idx].addr == pt) | |
475 | continue; | |
476 | vm->page_tables[pt_idx].addr = pt; | |
477 | ||
478 | pde = pd_addr + pt_idx * 8; | |
479 | if (((last_pde + 8 * count) != pde) || | |
480 | ((last_pt + incr * count) != pt)) { | |
481 | ||
482 | if (count) { | |
9ab21462 CK |
483 | amdgpu_vm_update_pages(adev, NULL, 0, ib, |
484 | last_pde, last_pt, | |
485 | count, incr, | |
486 | AMDGPU_PTE_VALID); | |
d38ceaf9 AD |
487 | } |
488 | ||
489 | count = 1; | |
490 | last_pde = pde; | |
491 | last_pt = pt; | |
492 | } else { | |
493 | ++count; | |
494 | } | |
495 | } | |
496 | ||
497 | if (count) | |
9ab21462 CK |
498 | amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt, |
499 | count, incr, AMDGPU_PTE_VALID); | |
d38ceaf9 | 500 | |
d5fc5e82 | 501 | if (ib->length_dw != 0) { |
9e5d5309 | 502 | amdgpu_ring_pad_ib(ring, ib); |
e86f9cee CK |
503 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, |
504 | AMDGPU_FENCE_OWNER_VM); | |
d5fc5e82 | 505 | WARN_ON(ib->length_dw > ndw); |
2bd9ccfa CK |
506 | r = amdgpu_job_submit(job, ring, &vm->entity, |
507 | AMDGPU_FENCE_OWNER_VM, &fence); | |
4af9f07c CZ |
508 | if (r) |
509 | goto error_free; | |
05906dec | 510 | |
4af9f07c | 511 | amdgpu_bo_fence(pd, fence, true); |
05906dec BN |
512 | fence_put(vm->page_directory_fence); |
513 | vm->page_directory_fence = fence_get(fence); | |
281b4223 | 514 | fence_put(fence); |
d5fc5e82 | 515 | |
d71518b5 CK |
516 | } else { |
517 | amdgpu_job_free(job); | |
d5fc5e82 | 518 | } |
d38ceaf9 AD |
519 | |
520 | return 0; | |
d5fc5e82 CZ |
521 | |
522 | error_free: | |
d71518b5 | 523 | amdgpu_job_free(job); |
4af9f07c | 524 | return r; |
d38ceaf9 AD |
525 | } |
526 | ||
527 | /** | |
528 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | |
529 | * | |
530 | * @adev: amdgpu_device pointer | |
9ab21462 CK |
531 | * @gtt: GART instance to use for mapping |
532 | * @gtt_flags: GTT hw mapping flags | |
d38ceaf9 AD |
533 | * @ib: IB for the update |
534 | * @pe_start: first PTE to handle | |
535 | * @pe_end: last PTE to handle | |
536 | * @addr: addr those PTEs should point to | |
537 | * @flags: hw mapping flags | |
d38ceaf9 AD |
538 | */ |
539 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |
9ab21462 CK |
540 | struct amdgpu_gart *gtt, |
541 | uint32_t gtt_flags, | |
d38ceaf9 AD |
542 | struct amdgpu_ib *ib, |
543 | uint64_t pe_start, uint64_t pe_end, | |
9ab21462 | 544 | uint64_t addr, uint32_t flags) |
d38ceaf9 AD |
545 | { |
546 | /** | |
547 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
548 | * field in the PTE. When this field is set to a non-zero value, page | |
549 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
550 | * flags are considered valid for all PTEs within the fragment range | |
551 | * and corresponding mappings are assumed to be physically contiguous. | |
552 | * | |
553 | * The L1 TLB can store a single PTE for the whole fragment, | |
554 | * significantly increasing the space available for translation | |
555 | * caching. This leads to large improvements in throughput when the | |
556 | * TLB is under pressure. | |
557 | * | |
558 | * The L2 TLB distributes small and large fragments into two | |
559 | * asymmetric partitions. The large fragment cache is significantly | |
560 | * larger. Thus, we try to use large fragments wherever possible. | |
561 | * Userspace can support this by aligning virtual base address and | |
562 | * allocation size to the fragment size. | |
563 | */ | |
564 | ||
565 | /* SI and newer are optimized for 64KB */ | |
566 | uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; | |
567 | uint64_t frag_align = 0x80; | |
568 | ||
569 | uint64_t frag_start = ALIGN(pe_start, frag_align); | |
570 | uint64_t frag_end = pe_end & ~(frag_align - 1); | |
571 | ||
572 | unsigned count; | |
573 | ||
31f6c1fe CK |
574 | /* Abort early if there isn't anything to do */ |
575 | if (pe_start == pe_end) | |
576 | return; | |
577 | ||
d38ceaf9 | 578 | /* system pages are non continuously */ |
9ab21462 | 579 | if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { |
d38ceaf9 AD |
580 | |
581 | count = (pe_end - pe_start) / 8; | |
9ab21462 CK |
582 | amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start, |
583 | addr, count, AMDGPU_GPU_PAGE_SIZE, | |
584 | flags); | |
d38ceaf9 AD |
585 | return; |
586 | } | |
587 | ||
588 | /* handle the 4K area at the beginning */ | |
589 | if (pe_start != frag_start) { | |
590 | count = (frag_start - pe_start) / 8; | |
9ab21462 CK |
591 | amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr, |
592 | count, AMDGPU_GPU_PAGE_SIZE, flags); | |
d38ceaf9 AD |
593 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
594 | } | |
595 | ||
596 | /* handle the area in the middle */ | |
597 | count = (frag_end - frag_start) / 8; | |
9ab21462 CK |
598 | amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count, |
599 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); | |
d38ceaf9 AD |
600 | |
601 | /* handle the 4K area at the end */ | |
602 | if (frag_end != pe_end) { | |
603 | addr += AMDGPU_GPU_PAGE_SIZE * count; | |
604 | count = (pe_end - frag_end) / 8; | |
9ab21462 CK |
605 | amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr, |
606 | count, AMDGPU_GPU_PAGE_SIZE, flags); | |
d38ceaf9 AD |
607 | } |
608 | } | |
609 | ||
610 | /** | |
611 | * amdgpu_vm_update_ptes - make sure that page tables are valid | |
612 | * | |
613 | * @adev: amdgpu_device pointer | |
9ab21462 CK |
614 | * @gtt: GART instance to use for mapping |
615 | * @gtt_flags: GTT hw mapping flags | |
d38ceaf9 AD |
616 | * @vm: requested vm |
617 | * @start: start of GPU address range | |
618 | * @end: end of GPU address range | |
619 | * @dst: destination address to map to | |
620 | * @flags: mapping flags | |
621 | * | |
8843dbbb | 622 | * Update the page tables in the range @start - @end. |
d38ceaf9 | 623 | */ |
a1e08d3b CK |
624 | static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, |
625 | struct amdgpu_gart *gtt, | |
626 | uint32_t gtt_flags, | |
627 | struct amdgpu_vm *vm, | |
628 | struct amdgpu_ib *ib, | |
629 | uint64_t start, uint64_t end, | |
630 | uint64_t dst, uint32_t flags) | |
d38ceaf9 | 631 | { |
31f6c1fe CK |
632 | const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; |
633 | ||
634 | uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; | |
d38ceaf9 AD |
635 | uint64_t addr; |
636 | ||
637 | /* walk over the address space and update the page tables */ | |
638 | for (addr = start; addr < end; ) { | |
639 | uint64_t pt_idx = addr >> amdgpu_vm_block_size; | |
ee1782c3 | 640 | struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; |
d38ceaf9 | 641 | unsigned nptes; |
31f6c1fe | 642 | uint64_t pe_start; |
d38ceaf9 AD |
643 | |
644 | if ((addr & ~mask) == (end & ~mask)) | |
645 | nptes = end - addr; | |
646 | else | |
647 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | |
648 | ||
31f6c1fe CK |
649 | pe_start = amdgpu_bo_gpu_offset(pt); |
650 | pe_start += (addr & mask) * 8; | |
d38ceaf9 | 651 | |
31f6c1fe | 652 | if (last_pe_end != pe_start) { |
d38ceaf9 | 653 | |
31f6c1fe CK |
654 | amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, |
655 | last_pe_start, last_pe_end, | |
656 | last_dst, flags); | |
d38ceaf9 | 657 | |
31f6c1fe CK |
658 | last_pe_start = pe_start; |
659 | last_pe_end = pe_start + 8 * nptes; | |
d38ceaf9 AD |
660 | last_dst = dst; |
661 | } else { | |
31f6c1fe | 662 | last_pe_end += 8 * nptes; |
d38ceaf9 AD |
663 | } |
664 | ||
665 | addr += nptes; | |
666 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | |
667 | } | |
668 | ||
31f6c1fe CK |
669 | amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib, |
670 | last_pe_start, last_pe_end, | |
671 | last_dst, flags); | |
d38ceaf9 AD |
672 | } |
673 | ||
d38ceaf9 AD |
674 | /** |
675 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | |
676 | * | |
677 | * @adev: amdgpu_device pointer | |
9ab21462 | 678 | * @gtt: GART instance to use for mapping |
a14faa65 | 679 | * @gtt_flags: flags as they are used for GTT |
d38ceaf9 | 680 | * @vm: requested vm |
a14faa65 CK |
681 | * @start: start of mapped range |
682 | * @last: last mapped entry | |
683 | * @flags: flags for the entries | |
d38ceaf9 | 684 | * @addr: addr to set the area to |
d38ceaf9 AD |
685 | * @fence: optional resulting fence |
686 | * | |
a14faa65 | 687 | * Fill in the page table entries between @start and @last. |
d38ceaf9 | 688 | * Returns 0 for success, -EINVAL for failure. |
d38ceaf9 AD |
689 | */ |
690 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |
9ab21462 CK |
691 | struct amdgpu_gart *gtt, |
692 | uint32_t gtt_flags, | |
d38ceaf9 | 693 | struct amdgpu_vm *vm, |
a14faa65 CK |
694 | uint64_t start, uint64_t last, |
695 | uint32_t flags, uint64_t addr, | |
696 | struct fence **fence) | |
d38ceaf9 | 697 | { |
2d55e45a | 698 | struct amdgpu_ring *ring; |
a1e08d3b | 699 | void *owner = AMDGPU_FENCE_OWNER_VM; |
d38ceaf9 | 700 | unsigned nptes, ncmds, ndw; |
d71518b5 | 701 | struct amdgpu_job *job; |
d5fc5e82 | 702 | struct amdgpu_ib *ib; |
4af9f07c | 703 | struct fence *f = NULL; |
d38ceaf9 AD |
704 | int r; |
705 | ||
2d55e45a CK |
706 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
707 | ||
a1e08d3b CK |
708 | /* sync to everything on unmapping */ |
709 | if (!(flags & AMDGPU_PTE_VALID)) | |
710 | owner = AMDGPU_FENCE_OWNER_UNDEFINED; | |
711 | ||
a14faa65 | 712 | nptes = last - start + 1; |
d38ceaf9 AD |
713 | |
714 | /* | |
715 | * reserve space for one command every (1 << BLOCK_SIZE) | |
716 | * entries or 2k dwords (whatever is smaller) | |
717 | */ | |
718 | ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; | |
719 | ||
720 | /* padding, etc. */ | |
721 | ndw = 64; | |
722 | ||
9ab21462 | 723 | if ((gtt == &adev->gart) && (flags == gtt_flags)) { |
d38ceaf9 AD |
724 | /* only copy commands needed */ |
725 | ndw += ncmds * 7; | |
726 | ||
9ab21462 | 727 | } else if (gtt) { |
d38ceaf9 AD |
728 | /* header for write data commands */ |
729 | ndw += ncmds * 4; | |
730 | ||
731 | /* body of write data command */ | |
732 | ndw += nptes * 2; | |
733 | ||
734 | } else { | |
735 | /* set page commands needed */ | |
736 | ndw += ncmds * 10; | |
737 | ||
738 | /* two extra commands for begin/end of fragment */ | |
739 | ndw += 2 * 10; | |
740 | } | |
741 | ||
d71518b5 CK |
742 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
743 | if (r) | |
d38ceaf9 | 744 | return r; |
d71518b5 CK |
745 | |
746 | ib = &job->ibs[0]; | |
d5fc5e82 | 747 | |
e86f9cee | 748 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, |
a1e08d3b CK |
749 | owner); |
750 | if (r) | |
751 | goto error_free; | |
d38ceaf9 | 752 | |
a1e08d3b CK |
753 | r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); |
754 | if (r) | |
755 | goto error_free; | |
756 | ||
757 | amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1, | |
758 | addr, flags); | |
d38ceaf9 | 759 | |
9e5d5309 | 760 | amdgpu_ring_pad_ib(ring, ib); |
d5fc5e82 | 761 | WARN_ON(ib->length_dw > ndw); |
2bd9ccfa CK |
762 | r = amdgpu_job_submit(job, ring, &vm->entity, |
763 | AMDGPU_FENCE_OWNER_VM, &f); | |
4af9f07c CZ |
764 | if (r) |
765 | goto error_free; | |
d38ceaf9 | 766 | |
bf60efd3 | 767 | amdgpu_bo_fence(vm->page_directory, f, true); |
4af9f07c CZ |
768 | if (fence) { |
769 | fence_put(*fence); | |
770 | *fence = fence_get(f); | |
771 | } | |
281b4223 | 772 | fence_put(f); |
d38ceaf9 | 773 | return 0; |
d5fc5e82 CZ |
774 | |
775 | error_free: | |
d71518b5 | 776 | amdgpu_job_free(job); |
4af9f07c | 777 | return r; |
d38ceaf9 AD |
778 | } |
779 | ||
a14faa65 CK |
780 | /** |
781 | * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks | |
782 | * | |
783 | * @adev: amdgpu_device pointer | |
784 | * @gtt: GART instance to use for mapping | |
785 | * @vm: requested vm | |
786 | * @mapping: mapped range and flags to use for the update | |
787 | * @addr: addr to set the area to | |
788 | * @gtt_flags: flags as they are used for GTT | |
789 | * @fence: optional resulting fence | |
790 | * | |
791 | * Split the mapping into smaller chunks so that each update fits | |
792 | * into a SDMA IB. | |
793 | * Returns 0 for success, -EINVAL for failure. | |
794 | */ | |
795 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |
796 | struct amdgpu_gart *gtt, | |
797 | uint32_t gtt_flags, | |
798 | struct amdgpu_vm *vm, | |
799 | struct amdgpu_bo_va_mapping *mapping, | |
800 | uint64_t addr, struct fence **fence) | |
801 | { | |
802 | const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; | |
803 | ||
804 | uint64_t start = mapping->it.start; | |
805 | uint32_t flags = gtt_flags; | |
806 | int r; | |
807 | ||
808 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | |
809 | * but in case of something, we filter the flags in first place | |
810 | */ | |
811 | if (!(mapping->flags & AMDGPU_PTE_READABLE)) | |
812 | flags &= ~AMDGPU_PTE_READABLE; | |
813 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) | |
814 | flags &= ~AMDGPU_PTE_WRITEABLE; | |
815 | ||
816 | trace_amdgpu_vm_bo_update(mapping); | |
817 | ||
818 | addr += mapping->offset; | |
819 | ||
820 | if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags))) | |
821 | return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, | |
822 | start, mapping->it.last, | |
823 | flags, addr, fence); | |
824 | ||
825 | while (start != mapping->it.last + 1) { | |
826 | uint64_t last; | |
827 | ||
828 | last = min((uint64_t)mapping->it.last, start + max_size); | |
829 | r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm, | |
830 | start, last, flags, addr, | |
831 | fence); | |
832 | if (r) | |
833 | return r; | |
834 | ||
835 | start = last + 1; | |
836 | addr += max_size; | |
837 | } | |
838 | ||
839 | return 0; | |
840 | } | |
841 | ||
d38ceaf9 AD |
842 | /** |
843 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table | |
844 | * | |
845 | * @adev: amdgpu_device pointer | |
846 | * @bo_va: requested BO and VM object | |
847 | * @mem: ttm mem | |
848 | * | |
849 | * Fill in the page table entries for @bo_va. | |
850 | * Returns 0 for success, -EINVAL for failure. | |
851 | * | |
852 | * Object have to be reserved and mutex must be locked! | |
853 | */ | |
854 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |
855 | struct amdgpu_bo_va *bo_va, | |
856 | struct ttm_mem_reg *mem) | |
857 | { | |
858 | struct amdgpu_vm *vm = bo_va->vm; | |
859 | struct amdgpu_bo_va_mapping *mapping; | |
9ab21462 | 860 | struct amdgpu_gart *gtt = NULL; |
d38ceaf9 AD |
861 | uint32_t flags; |
862 | uint64_t addr; | |
863 | int r; | |
864 | ||
865 | if (mem) { | |
b7d698d7 | 866 | addr = (u64)mem->start << PAGE_SHIFT; |
9ab21462 CK |
867 | switch (mem->mem_type) { |
868 | case TTM_PL_TT: | |
869 | gtt = &bo_va->bo->adev->gart; | |
870 | break; | |
871 | ||
872 | case TTM_PL_VRAM: | |
d38ceaf9 | 873 | addr += adev->vm_manager.vram_base_offset; |
9ab21462 CK |
874 | break; |
875 | ||
876 | default: | |
877 | break; | |
878 | } | |
d38ceaf9 AD |
879 | } else { |
880 | addr = 0; | |
881 | } | |
882 | ||
d38ceaf9 AD |
883 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); |
884 | ||
7fc11959 CK |
885 | spin_lock(&vm->status_lock); |
886 | if (!list_empty(&bo_va->vm_status)) | |
887 | list_splice_init(&bo_va->valids, &bo_va->invalids); | |
888 | spin_unlock(&vm->status_lock); | |
889 | ||
890 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
a14faa65 CK |
891 | r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr, |
892 | &bo_va->last_pt_update); | |
d38ceaf9 AD |
893 | if (r) |
894 | return r; | |
895 | } | |
896 | ||
d6c10f6b CK |
897 | if (trace_amdgpu_vm_bo_mapping_enabled()) { |
898 | list_for_each_entry(mapping, &bo_va->valids, list) | |
899 | trace_amdgpu_vm_bo_mapping(mapping); | |
900 | ||
901 | list_for_each_entry(mapping, &bo_va->invalids, list) | |
902 | trace_amdgpu_vm_bo_mapping(mapping); | |
903 | } | |
904 | ||
d38ceaf9 | 905 | spin_lock(&vm->status_lock); |
6d1d0ef7 | 906 | list_splice_init(&bo_va->invalids, &bo_va->valids); |
d38ceaf9 | 907 | list_del_init(&bo_va->vm_status); |
7fc11959 CK |
908 | if (!mem) |
909 | list_add(&bo_va->vm_status, &vm->cleared); | |
d38ceaf9 AD |
910 | spin_unlock(&vm->status_lock); |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | /** | |
916 | * amdgpu_vm_clear_freed - clear freed BOs in the PT | |
917 | * | |
918 | * @adev: amdgpu_device pointer | |
919 | * @vm: requested vm | |
920 | * | |
921 | * Make sure all freed BOs are cleared in the PT. | |
922 | * Returns 0 for success. | |
923 | * | |
924 | * PTs have to be reserved and mutex must be locked! | |
925 | */ | |
926 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |
927 | struct amdgpu_vm *vm) | |
928 | { | |
929 | struct amdgpu_bo_va_mapping *mapping; | |
930 | int r; | |
931 | ||
81d75a30 | 932 | spin_lock(&vm->freed_lock); |
d38ceaf9 AD |
933 | while (!list_empty(&vm->freed)) { |
934 | mapping = list_first_entry(&vm->freed, | |
935 | struct amdgpu_bo_va_mapping, list); | |
936 | list_del(&mapping->list); | |
81d75a30 | 937 | spin_unlock(&vm->freed_lock); |
a14faa65 CK |
938 | r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, |
939 | 0, NULL); | |
d38ceaf9 AD |
940 | kfree(mapping); |
941 | if (r) | |
942 | return r; | |
943 | ||
81d75a30 | 944 | spin_lock(&vm->freed_lock); |
d38ceaf9 | 945 | } |
81d75a30 | 946 | spin_unlock(&vm->freed_lock); |
947 | ||
d38ceaf9 AD |
948 | return 0; |
949 | ||
950 | } | |
951 | ||
952 | /** | |
953 | * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT | |
954 | * | |
955 | * @adev: amdgpu_device pointer | |
956 | * @vm: requested vm | |
957 | * | |
958 | * Make sure all invalidated BOs are cleared in the PT. | |
959 | * Returns 0 for success. | |
960 | * | |
961 | * PTs have to be reserved and mutex must be locked! | |
962 | */ | |
963 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |
cfe2c978 | 964 | struct amdgpu_vm *vm, struct amdgpu_sync *sync) |
d38ceaf9 | 965 | { |
cfe2c978 | 966 | struct amdgpu_bo_va *bo_va = NULL; |
91e1a520 | 967 | int r = 0; |
d38ceaf9 AD |
968 | |
969 | spin_lock(&vm->status_lock); | |
970 | while (!list_empty(&vm->invalidated)) { | |
971 | bo_va = list_first_entry(&vm->invalidated, | |
972 | struct amdgpu_bo_va, vm_status); | |
973 | spin_unlock(&vm->status_lock); | |
69b576a1 | 974 | mutex_lock(&bo_va->mutex); |
d38ceaf9 | 975 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); |
69b576a1 | 976 | mutex_unlock(&bo_va->mutex); |
d38ceaf9 AD |
977 | if (r) |
978 | return r; | |
979 | ||
980 | spin_lock(&vm->status_lock); | |
981 | } | |
982 | spin_unlock(&vm->status_lock); | |
983 | ||
cfe2c978 | 984 | if (bo_va) |
bb1e38a4 | 985 | r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); |
91e1a520 CK |
986 | |
987 | return r; | |
d38ceaf9 AD |
988 | } |
989 | ||
990 | /** | |
991 | * amdgpu_vm_bo_add - add a bo to a specific vm | |
992 | * | |
993 | * @adev: amdgpu_device pointer | |
994 | * @vm: requested vm | |
995 | * @bo: amdgpu buffer object | |
996 | * | |
8843dbbb | 997 | * Add @bo into the requested vm. |
d38ceaf9 AD |
998 | * Add @bo to the list of bos associated with the vm |
999 | * Returns newly added bo_va or NULL for failure | |
1000 | * | |
1001 | * Object has to be reserved! | |
1002 | */ | |
1003 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |
1004 | struct amdgpu_vm *vm, | |
1005 | struct amdgpu_bo *bo) | |
1006 | { | |
1007 | struct amdgpu_bo_va *bo_va; | |
1008 | ||
1009 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); | |
1010 | if (bo_va == NULL) { | |
1011 | return NULL; | |
1012 | } | |
1013 | bo_va->vm = vm; | |
1014 | bo_va->bo = bo; | |
d38ceaf9 AD |
1015 | bo_va->ref_count = 1; |
1016 | INIT_LIST_HEAD(&bo_va->bo_list); | |
7fc11959 CK |
1017 | INIT_LIST_HEAD(&bo_va->valids); |
1018 | INIT_LIST_HEAD(&bo_va->invalids); | |
d38ceaf9 | 1019 | INIT_LIST_HEAD(&bo_va->vm_status); |
69b576a1 | 1020 | mutex_init(&bo_va->mutex); |
d38ceaf9 | 1021 | list_add_tail(&bo_va->bo_list, &bo->va); |
d38ceaf9 AD |
1022 | |
1023 | return bo_va; | |
1024 | } | |
1025 | ||
1026 | /** | |
1027 | * amdgpu_vm_bo_map - map bo inside a vm | |
1028 | * | |
1029 | * @adev: amdgpu_device pointer | |
1030 | * @bo_va: bo_va to store the address | |
1031 | * @saddr: where to map the BO | |
1032 | * @offset: requested offset in the BO | |
1033 | * @flags: attributes of pages (read/write/valid/etc.) | |
1034 | * | |
1035 | * Add a mapping of the BO at the specefied addr into the VM. | |
1036 | * Returns 0 for success, error for failure. | |
1037 | * | |
49b02b18 | 1038 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
1039 | */ |
1040 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |
1041 | struct amdgpu_bo_va *bo_va, | |
1042 | uint64_t saddr, uint64_t offset, | |
1043 | uint64_t size, uint32_t flags) | |
1044 | { | |
1045 | struct amdgpu_bo_va_mapping *mapping; | |
1046 | struct amdgpu_vm *vm = bo_va->vm; | |
1047 | struct interval_tree_node *it; | |
1048 | unsigned last_pfn, pt_idx; | |
1049 | uint64_t eaddr; | |
1050 | int r; | |
1051 | ||
0be52de9 CK |
1052 | /* validate the parameters */ |
1053 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | |
49b02b18 | 1054 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
0be52de9 | 1055 | return -EINVAL; |
0be52de9 | 1056 | |
d38ceaf9 | 1057 | /* make sure object fit at this offset */ |
005ae95e | 1058 | eaddr = saddr + size - 1; |
49b02b18 | 1059 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) |
d38ceaf9 | 1060 | return -EINVAL; |
d38ceaf9 AD |
1061 | |
1062 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | |
005ae95e FK |
1063 | if (last_pfn >= adev->vm_manager.max_pfn) { |
1064 | dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", | |
d38ceaf9 | 1065 | last_pfn, adev->vm_manager.max_pfn); |
d38ceaf9 AD |
1066 | return -EINVAL; |
1067 | } | |
1068 | ||
d38ceaf9 AD |
1069 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
1070 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
1071 | ||
c25867df | 1072 | spin_lock(&vm->it_lock); |
005ae95e | 1073 | it = interval_tree_iter_first(&vm->va, saddr, eaddr); |
c25867df | 1074 | spin_unlock(&vm->it_lock); |
d38ceaf9 AD |
1075 | if (it) { |
1076 | struct amdgpu_bo_va_mapping *tmp; | |
1077 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | |
1078 | /* bo and tmp overlap, invalid addr */ | |
1079 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | |
1080 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | |
1081 | tmp->it.start, tmp->it.last + 1); | |
d38ceaf9 | 1082 | r = -EINVAL; |
f48b2659 | 1083 | goto error; |
d38ceaf9 AD |
1084 | } |
1085 | ||
1086 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | |
1087 | if (!mapping) { | |
d38ceaf9 | 1088 | r = -ENOMEM; |
f48b2659 | 1089 | goto error; |
d38ceaf9 AD |
1090 | } |
1091 | ||
1092 | INIT_LIST_HEAD(&mapping->list); | |
1093 | mapping->it.start = saddr; | |
005ae95e | 1094 | mapping->it.last = eaddr; |
d38ceaf9 AD |
1095 | mapping->offset = offset; |
1096 | mapping->flags = flags; | |
1097 | ||
69b576a1 | 1098 | mutex_lock(&bo_va->mutex); |
7fc11959 | 1099 | list_add(&mapping->list, &bo_va->invalids); |
69b576a1 | 1100 | mutex_unlock(&bo_va->mutex); |
c25867df | 1101 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1102 | interval_tree_insert(&mapping->it, &vm->va); |
c25867df | 1103 | spin_unlock(&vm->it_lock); |
93e3e438 | 1104 | trace_amdgpu_vm_bo_map(bo_va, mapping); |
d38ceaf9 AD |
1105 | |
1106 | /* Make sure the page tables are allocated */ | |
1107 | saddr >>= amdgpu_vm_block_size; | |
1108 | eaddr >>= amdgpu_vm_block_size; | |
1109 | ||
1110 | BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); | |
1111 | ||
1112 | if (eaddr > vm->max_pde_used) | |
1113 | vm->max_pde_used = eaddr; | |
1114 | ||
d38ceaf9 AD |
1115 | /* walk over the address space and allocate the page tables */ |
1116 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | |
bf60efd3 | 1117 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
ee1782c3 | 1118 | struct amdgpu_bo_list_entry *entry; |
d38ceaf9 AD |
1119 | struct amdgpu_bo *pt; |
1120 | ||
ee1782c3 CK |
1121 | entry = &vm->page_tables[pt_idx].entry; |
1122 | if (entry->robj) | |
d38ceaf9 AD |
1123 | continue; |
1124 | ||
d38ceaf9 AD |
1125 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1126 | AMDGPU_GPU_PAGE_SIZE, true, | |
857d913d AD |
1127 | AMDGPU_GEM_DOMAIN_VRAM, |
1128 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | |
bf60efd3 | 1129 | NULL, resv, &pt); |
49b02b18 | 1130 | if (r) |
d38ceaf9 | 1131 | goto error_free; |
49b02b18 | 1132 | |
82b9c55b CK |
1133 | /* Keep a reference to the page table to avoid freeing |
1134 | * them up in the wrong order. | |
1135 | */ | |
1136 | pt->parent = amdgpu_bo_ref(vm->page_directory); | |
1137 | ||
2bd9ccfa | 1138 | r = amdgpu_vm_clear_bo(adev, vm, pt); |
d38ceaf9 AD |
1139 | if (r) { |
1140 | amdgpu_bo_unref(&pt); | |
1141 | goto error_free; | |
1142 | } | |
1143 | ||
ee1782c3 | 1144 | entry->robj = pt; |
ee1782c3 CK |
1145 | entry->priority = 0; |
1146 | entry->tv.bo = &entry->robj->tbo; | |
1147 | entry->tv.shared = true; | |
d38ceaf9 | 1148 | vm->page_tables[pt_idx].addr = 0; |
d38ceaf9 AD |
1149 | } |
1150 | ||
d38ceaf9 AD |
1151 | return 0; |
1152 | ||
1153 | error_free: | |
d38ceaf9 | 1154 | list_del(&mapping->list); |
c25867df | 1155 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1156 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1157 | spin_unlock(&vm->it_lock); |
93e3e438 | 1158 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 AD |
1159 | kfree(mapping); |
1160 | ||
f48b2659 | 1161 | error: |
d38ceaf9 AD |
1162 | return r; |
1163 | } | |
1164 | ||
1165 | /** | |
1166 | * amdgpu_vm_bo_unmap - remove bo mapping from vm | |
1167 | * | |
1168 | * @adev: amdgpu_device pointer | |
1169 | * @bo_va: bo_va to remove the address from | |
1170 | * @saddr: where to the BO is mapped | |
1171 | * | |
1172 | * Remove a mapping of the BO at the specefied addr from the VM. | |
1173 | * Returns 0 for success, error for failure. | |
1174 | * | |
49b02b18 | 1175 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
1176 | */ |
1177 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |
1178 | struct amdgpu_bo_va *bo_va, | |
1179 | uint64_t saddr) | |
1180 | { | |
1181 | struct amdgpu_bo_va_mapping *mapping; | |
1182 | struct amdgpu_vm *vm = bo_va->vm; | |
7fc11959 | 1183 | bool valid = true; |
d38ceaf9 | 1184 | |
6c7fc503 | 1185 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
69b576a1 | 1186 | mutex_lock(&bo_va->mutex); |
7fc11959 | 1187 | list_for_each_entry(mapping, &bo_va->valids, list) { |
d38ceaf9 AD |
1188 | if (mapping->it.start == saddr) |
1189 | break; | |
1190 | } | |
1191 | ||
7fc11959 CK |
1192 | if (&mapping->list == &bo_va->valids) { |
1193 | valid = false; | |
1194 | ||
1195 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
1196 | if (mapping->it.start == saddr) | |
1197 | break; | |
1198 | } | |
1199 | ||
69b576a1 CZ |
1200 | if (&mapping->list == &bo_va->invalids) { |
1201 | mutex_unlock(&bo_va->mutex); | |
7fc11959 | 1202 | return -ENOENT; |
69b576a1 | 1203 | } |
d38ceaf9 | 1204 | } |
69b576a1 | 1205 | mutex_unlock(&bo_va->mutex); |
d38ceaf9 | 1206 | list_del(&mapping->list); |
c25867df | 1207 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1208 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1209 | spin_unlock(&vm->it_lock); |
93e3e438 | 1210 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 | 1211 | |
81d75a30 | 1212 | if (valid) { |
1213 | spin_lock(&vm->freed_lock); | |
d38ceaf9 | 1214 | list_add(&mapping->list, &vm->freed); |
81d75a30 | 1215 | spin_unlock(&vm->freed_lock); |
1216 | } else { | |
d38ceaf9 | 1217 | kfree(mapping); |
81d75a30 | 1218 | } |
d38ceaf9 AD |
1219 | |
1220 | return 0; | |
1221 | } | |
1222 | ||
1223 | /** | |
1224 | * amdgpu_vm_bo_rmv - remove a bo to a specific vm | |
1225 | * | |
1226 | * @adev: amdgpu_device pointer | |
1227 | * @bo_va: requested bo_va | |
1228 | * | |
8843dbbb | 1229 | * Remove @bo_va->bo from the requested vm. |
d38ceaf9 AD |
1230 | * |
1231 | * Object have to be reserved! | |
1232 | */ | |
1233 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |
1234 | struct amdgpu_bo_va *bo_va) | |
1235 | { | |
1236 | struct amdgpu_bo_va_mapping *mapping, *next; | |
1237 | struct amdgpu_vm *vm = bo_va->vm; | |
1238 | ||
1239 | list_del(&bo_va->bo_list); | |
1240 | ||
d38ceaf9 AD |
1241 | spin_lock(&vm->status_lock); |
1242 | list_del(&bo_va->vm_status); | |
1243 | spin_unlock(&vm->status_lock); | |
1244 | ||
7fc11959 | 1245 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
d38ceaf9 | 1246 | list_del(&mapping->list); |
c25867df | 1247 | spin_lock(&vm->it_lock); |
d38ceaf9 | 1248 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1249 | spin_unlock(&vm->it_lock); |
93e3e438 | 1250 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
81d75a30 | 1251 | spin_lock(&vm->freed_lock); |
7fc11959 | 1252 | list_add(&mapping->list, &vm->freed); |
81d75a30 | 1253 | spin_unlock(&vm->freed_lock); |
7fc11959 CK |
1254 | } |
1255 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | |
1256 | list_del(&mapping->list); | |
c25867df | 1257 | spin_lock(&vm->it_lock); |
7fc11959 | 1258 | interval_tree_remove(&mapping->it, &vm->va); |
c25867df | 1259 | spin_unlock(&vm->it_lock); |
7fc11959 | 1260 | kfree(mapping); |
d38ceaf9 | 1261 | } |
bb1e38a4 | 1262 | fence_put(bo_va->last_pt_update); |
69b576a1 | 1263 | mutex_destroy(&bo_va->mutex); |
d38ceaf9 | 1264 | kfree(bo_va); |
d38ceaf9 AD |
1265 | } |
1266 | ||
1267 | /** | |
1268 | * amdgpu_vm_bo_invalidate - mark the bo as invalid | |
1269 | * | |
1270 | * @adev: amdgpu_device pointer | |
1271 | * @vm: requested vm | |
1272 | * @bo: amdgpu buffer object | |
1273 | * | |
8843dbbb | 1274 | * Mark @bo as invalid. |
d38ceaf9 AD |
1275 | */ |
1276 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | |
1277 | struct amdgpu_bo *bo) | |
1278 | { | |
1279 | struct amdgpu_bo_va *bo_va; | |
1280 | ||
1281 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
7fc11959 CK |
1282 | spin_lock(&bo_va->vm->status_lock); |
1283 | if (list_empty(&bo_va->vm_status)) | |
d38ceaf9 | 1284 | list_add(&bo_va->vm_status, &bo_va->vm->invalidated); |
7fc11959 | 1285 | spin_unlock(&bo_va->vm->status_lock); |
d38ceaf9 AD |
1286 | } |
1287 | } | |
1288 | ||
1289 | /** | |
1290 | * amdgpu_vm_init - initialize a vm instance | |
1291 | * | |
1292 | * @adev: amdgpu_device pointer | |
1293 | * @vm: requested vm | |
1294 | * | |
8843dbbb | 1295 | * Init @vm fields. |
d38ceaf9 AD |
1296 | */ |
1297 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1298 | { | |
1299 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, | |
1300 | AMDGPU_VM_PTE_COUNT * 8); | |
9571e1d8 | 1301 | unsigned pd_size, pd_entries; |
2d55e45a CK |
1302 | unsigned ring_instance; |
1303 | struct amdgpu_ring *ring; | |
2bd9ccfa | 1304 | struct amd_sched_rq *rq; |
d38ceaf9 AD |
1305 | int i, r; |
1306 | ||
1307 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
4ff37a83 | 1308 | vm->ids[i].mgr_id = NULL; |
d38ceaf9 | 1309 | vm->ids[i].flushed_updates = NULL; |
d38ceaf9 | 1310 | } |
d38ceaf9 AD |
1311 | vm->va = RB_ROOT; |
1312 | spin_lock_init(&vm->status_lock); | |
1313 | INIT_LIST_HEAD(&vm->invalidated); | |
7fc11959 | 1314 | INIT_LIST_HEAD(&vm->cleared); |
d38ceaf9 | 1315 | INIT_LIST_HEAD(&vm->freed); |
c25867df | 1316 | spin_lock_init(&vm->it_lock); |
81d75a30 | 1317 | spin_lock_init(&vm->freed_lock); |
d38ceaf9 AD |
1318 | pd_size = amdgpu_vm_directory_size(adev); |
1319 | pd_entries = amdgpu_vm_num_pdes(adev); | |
1320 | ||
1321 | /* allocate page table array */ | |
9571e1d8 | 1322 | vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); |
d38ceaf9 AD |
1323 | if (vm->page_tables == NULL) { |
1324 | DRM_ERROR("Cannot allocate memory for page table array\n"); | |
1325 | return -ENOMEM; | |
1326 | } | |
1327 | ||
2bd9ccfa | 1328 | /* create scheduler entity for page table updates */ |
2d55e45a CK |
1329 | |
1330 | ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); | |
1331 | ring_instance %= adev->vm_manager.vm_pte_num_rings; | |
1332 | ring = adev->vm_manager.vm_pte_rings[ring_instance]; | |
2bd9ccfa CK |
1333 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; |
1334 | r = amd_sched_entity_init(&ring->sched, &vm->entity, | |
1335 | rq, amdgpu_sched_jobs); | |
1336 | if (r) | |
1337 | return r; | |
1338 | ||
05906dec BN |
1339 | vm->page_directory_fence = NULL; |
1340 | ||
d38ceaf9 | 1341 | r = amdgpu_bo_create(adev, pd_size, align, true, |
857d913d AD |
1342 | AMDGPU_GEM_DOMAIN_VRAM, |
1343 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | |
72d7668b | 1344 | NULL, NULL, &vm->page_directory); |
d38ceaf9 | 1345 | if (r) |
2bd9ccfa CK |
1346 | goto error_free_sched_entity; |
1347 | ||
ef9f0a83 | 1348 | r = amdgpu_bo_reserve(vm->page_directory, false); |
2bd9ccfa CK |
1349 | if (r) |
1350 | goto error_free_page_directory; | |
1351 | ||
1352 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); | |
ef9f0a83 | 1353 | amdgpu_bo_unreserve(vm->page_directory); |
2bd9ccfa CK |
1354 | if (r) |
1355 | goto error_free_page_directory; | |
d38ceaf9 AD |
1356 | |
1357 | return 0; | |
2bd9ccfa CK |
1358 | |
1359 | error_free_page_directory: | |
1360 | amdgpu_bo_unref(&vm->page_directory); | |
1361 | vm->page_directory = NULL; | |
1362 | ||
1363 | error_free_sched_entity: | |
1364 | amd_sched_entity_fini(&ring->sched, &vm->entity); | |
1365 | ||
1366 | return r; | |
d38ceaf9 AD |
1367 | } |
1368 | ||
1369 | /** | |
1370 | * amdgpu_vm_fini - tear down a vm instance | |
1371 | * | |
1372 | * @adev: amdgpu_device pointer | |
1373 | * @vm: requested vm | |
1374 | * | |
8843dbbb | 1375 | * Tear down @vm. |
d38ceaf9 AD |
1376 | * Unbind the VM and remove all bos from the vm bo list |
1377 | */ | |
1378 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1379 | { | |
1380 | struct amdgpu_bo_va_mapping *mapping, *tmp; | |
1381 | int i; | |
1382 | ||
2d55e45a | 1383 | amd_sched_entity_fini(vm->entity.sched, &vm->entity); |
2bd9ccfa | 1384 | |
d38ceaf9 AD |
1385 | if (!RB_EMPTY_ROOT(&vm->va)) { |
1386 | dev_err(adev->dev, "still active bo inside vm\n"); | |
1387 | } | |
1388 | rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { | |
1389 | list_del(&mapping->list); | |
1390 | interval_tree_remove(&mapping->it, &vm->va); | |
1391 | kfree(mapping); | |
1392 | } | |
1393 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { | |
1394 | list_del(&mapping->list); | |
1395 | kfree(mapping); | |
1396 | } | |
1397 | ||
1398 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) | |
ee1782c3 | 1399 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); |
9571e1d8 | 1400 | drm_free_large(vm->page_tables); |
d38ceaf9 AD |
1401 | |
1402 | amdgpu_bo_unref(&vm->page_directory); | |
05906dec | 1403 | fence_put(vm->page_directory_fence); |
d38ceaf9 | 1404 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
4ff37a83 | 1405 | struct amdgpu_vm_id *id = &vm->ids[i]; |
1c16c0a7 | 1406 | |
4ff37a83 CK |
1407 | if (id->mgr_id) |
1408 | atomic_long_cmpxchg(&id->mgr_id->owner, | |
1409 | (long)id, 0); | |
1410 | fence_put(id->flushed_updates); | |
d38ceaf9 | 1411 | } |
d38ceaf9 | 1412 | } |
ea89f8c9 | 1413 | |
a9a78b32 CK |
1414 | /** |
1415 | * amdgpu_vm_manager_init - init the VM manager | |
1416 | * | |
1417 | * @adev: amdgpu_device pointer | |
1418 | * | |
1419 | * Initialize the VM manager structures | |
1420 | */ | |
1421 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |
1422 | { | |
1423 | unsigned i; | |
1424 | ||
1425 | INIT_LIST_HEAD(&adev->vm_manager.ids_lru); | |
1426 | ||
1427 | /* skip over VMID 0, since it is the system VM */ | |
1428 | for (i = 1; i < adev->vm_manager.num_ids; ++i) | |
1429 | list_add_tail(&adev->vm_manager.ids[i].list, | |
1430 | &adev->vm_manager.ids_lru); | |
2d55e45a CK |
1431 | |
1432 | atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); | |
a9a78b32 CK |
1433 | } |
1434 | ||
ea89f8c9 CK |
1435 | /** |
1436 | * amdgpu_vm_manager_fini - cleanup VM manager | |
1437 | * | |
1438 | * @adev: amdgpu_device pointer | |
1439 | * | |
1440 | * Cleanup the VM manager and free resources. | |
1441 | */ | |
1442 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |
1443 | { | |
1444 | unsigned i; | |
1445 | ||
1446 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | |
1c16c0a7 | 1447 | fence_put(adev->vm_manager.ids[i].active); |
ea89f8c9 | 1448 | } |