]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
1fbb2e92 | 28 | #include <linux/fence-array.h> |
d38ceaf9 AD |
29 | #include <drm/drmP.h> |
30 | #include <drm/amdgpu_drm.h> | |
31 | #include "amdgpu.h" | |
32 | #include "amdgpu_trace.h" | |
33 | ||
34 | /* | |
35 | * GPUVM | |
36 | * GPUVM is similar to the legacy gart on older asics, however | |
37 | * rather than there being a single global gart table | |
38 | * for the entire GPU, there are multiple VM page tables active | |
39 | * at any given time. The VM page tables can contain a mix | |
40 | * vram pages and system memory pages and system memory pages | |
41 | * can be mapped as snooped (cached system pages) or unsnooped | |
42 | * (uncached system pages). | |
43 | * Each VM has an ID associated with it and there is a page table | |
44 | * associated with each VMID. When execting a command buffer, | |
45 | * the kernel tells the the ring what VMID to use for that command | |
46 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
47 | * The userspace drivers maintain their own address space and the kernel | |
48 | * sets up their pages tables accordingly when they submit their | |
49 | * command buffers and a VMID is assigned. | |
50 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
51 | * SI supports 16. | |
52 | */ | |
53 | ||
4ff37a83 CK |
54 | /* Special value that no flush is necessary */ |
55 | #define AMDGPU_VM_NO_FLUSH (~0ll) | |
56 | ||
f4833c4f HK |
57 | /* Local structure. Encapsulate some VM table update parameters to reduce |
58 | * the number of function parameters | |
59 | */ | |
60 | struct amdgpu_vm_update_params { | |
61 | /* address where to copy page table entries from */ | |
62 | uint64_t src; | |
63 | /* DMA addresses to use for mapping */ | |
64 | dma_addr_t *pages_addr; | |
65 | /* indirect buffer to fill with commands */ | |
66 | struct amdgpu_ib *ib; | |
67 | }; | |
68 | ||
d38ceaf9 AD |
69 | /** |
70 | * amdgpu_vm_num_pde - return the number of page directory entries | |
71 | * | |
72 | * @adev: amdgpu_device pointer | |
73 | * | |
8843dbbb | 74 | * Calculate the number of page directory entries. |
d38ceaf9 AD |
75 | */ |
76 | static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) | |
77 | { | |
78 | return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; | |
79 | } | |
80 | ||
81 | /** | |
82 | * amdgpu_vm_directory_size - returns the size of the page directory in bytes | |
83 | * | |
84 | * @adev: amdgpu_device pointer | |
85 | * | |
8843dbbb | 86 | * Calculate the size of the page directory in bytes. |
d38ceaf9 AD |
87 | */ |
88 | static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) | |
89 | { | |
90 | return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); | |
91 | } | |
92 | ||
93 | /** | |
56467ebf | 94 | * amdgpu_vm_get_pd_bo - add the VM PD to a validation list |
d38ceaf9 AD |
95 | * |
96 | * @vm: vm providing the BOs | |
3c0eea6c | 97 | * @validated: head of validation list |
56467ebf | 98 | * @entry: entry to add |
d38ceaf9 AD |
99 | * |
100 | * Add the page directory to the list of BOs to | |
56467ebf | 101 | * validate for command submission. |
d38ceaf9 | 102 | */ |
56467ebf CK |
103 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, |
104 | struct list_head *validated, | |
105 | struct amdgpu_bo_list_entry *entry) | |
d38ceaf9 | 106 | { |
56467ebf | 107 | entry->robj = vm->page_directory; |
56467ebf CK |
108 | entry->priority = 0; |
109 | entry->tv.bo = &vm->page_directory->tbo; | |
110 | entry->tv.shared = true; | |
2f568dbd | 111 | entry->user_pages = NULL; |
56467ebf CK |
112 | list_add(&entry->tv.head, validated); |
113 | } | |
d38ceaf9 | 114 | |
56467ebf | 115 | /** |
ee1782c3 | 116 | * amdgpu_vm_get_bos - add the vm BOs to a duplicates list |
56467ebf CK |
117 | * |
118 | * @vm: vm providing the BOs | |
3c0eea6c | 119 | * @duplicates: head of duplicates list |
d38ceaf9 | 120 | * |
ee1782c3 CK |
121 | * Add the page directory to the BO duplicates list |
122 | * for command submission. | |
d38ceaf9 | 123 | */ |
ee1782c3 | 124 | void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) |
d38ceaf9 | 125 | { |
ee1782c3 | 126 | unsigned i; |
d38ceaf9 AD |
127 | |
128 | /* add the vm page table to the list */ | |
ee1782c3 CK |
129 | for (i = 0; i <= vm->max_pde_used; ++i) { |
130 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | |
131 | ||
132 | if (!entry->robj) | |
d38ceaf9 AD |
133 | continue; |
134 | ||
ee1782c3 | 135 | list_add(&entry->tv.head, duplicates); |
d38ceaf9 | 136 | } |
eceb8a15 CK |
137 | |
138 | } | |
139 | ||
140 | /** | |
141 | * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail | |
142 | * | |
143 | * @adev: amdgpu device instance | |
144 | * @vm: vm providing the BOs | |
145 | * | |
146 | * Move the PT BOs to the tail of the LRU. | |
147 | */ | |
148 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | |
149 | struct amdgpu_vm *vm) | |
150 | { | |
151 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | |
152 | unsigned i; | |
153 | ||
154 | spin_lock(&glob->lru_lock); | |
155 | for (i = 0; i <= vm->max_pde_used; ++i) { | |
156 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | |
157 | ||
158 | if (!entry->robj) | |
159 | continue; | |
160 | ||
161 | ttm_bo_move_to_lru_tail(&entry->robj->tbo); | |
162 | } | |
163 | spin_unlock(&glob->lru_lock); | |
d38ceaf9 AD |
164 | } |
165 | ||
166 | /** | |
167 | * amdgpu_vm_grab_id - allocate the next free VMID | |
168 | * | |
d38ceaf9 | 169 | * @vm: vm to allocate id for |
7f8a5290 CK |
170 | * @ring: ring we want to submit job to |
171 | * @sync: sync object where we add dependencies | |
94dd0a4a | 172 | * @fence: fence protecting ID from reuse |
d38ceaf9 | 173 | * |
7f8a5290 | 174 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
d38ceaf9 | 175 | */ |
7f8a5290 | 176 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
4ff37a83 CK |
177 | struct amdgpu_sync *sync, struct fence *fence, |
178 | unsigned *vm_id, uint64_t *vm_pd_addr) | |
d38ceaf9 | 179 | { |
4ff37a83 | 180 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
d38ceaf9 | 181 | struct amdgpu_device *adev = ring->adev; |
4ff37a83 | 182 | struct fence *updates = sync->last_vm_update; |
8d76001e | 183 | struct amdgpu_vm_id *id, *idle; |
1fbb2e92 CK |
184 | struct fence **fences; |
185 | unsigned i; | |
186 | int r = 0; | |
187 | ||
188 | fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, | |
189 | GFP_KERNEL); | |
190 | if (!fences) | |
191 | return -ENOMEM; | |
d38ceaf9 | 192 | |
94dd0a4a CK |
193 | mutex_lock(&adev->vm_manager.lock); |
194 | ||
36fd7c5c | 195 | /* Check if we have an idle VMID */ |
1fbb2e92 | 196 | i = 0; |
8d76001e | 197 | list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { |
1fbb2e92 CK |
198 | fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); |
199 | if (!fences[i]) | |
36fd7c5c | 200 | break; |
1fbb2e92 | 201 | ++i; |
36fd7c5c CK |
202 | } |
203 | ||
1fbb2e92 | 204 | /* If we can't find a idle VMID to use, wait till one becomes available */ |
8d76001e | 205 | if (&idle->list == &adev->vm_manager.ids_lru) { |
1fbb2e92 CK |
206 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; |
207 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; | |
208 | struct fence_array *array; | |
209 | unsigned j; | |
210 | ||
211 | for (j = 0; j < i; ++j) | |
212 | fence_get(fences[j]); | |
213 | ||
214 | array = fence_array_create(i, fences, fence_context, | |
215 | seqno, true); | |
216 | if (!array) { | |
217 | for (j = 0; j < i; ++j) | |
218 | fence_put(fences[j]); | |
219 | kfree(fences); | |
220 | r = -ENOMEM; | |
221 | goto error; | |
222 | } | |
223 | ||
224 | ||
225 | r = amdgpu_sync_fence(ring->adev, sync, &array->base); | |
226 | fence_put(&array->base); | |
227 | if (r) | |
228 | goto error; | |
229 | ||
230 | mutex_unlock(&adev->vm_manager.lock); | |
231 | return 0; | |
232 | ||
233 | } | |
234 | kfree(fences); | |
235 | ||
236 | /* Check if we can use a VMID already assigned to this VM */ | |
237 | i = ring->idx; | |
238 | do { | |
239 | struct fence *flushed; | |
3dab83be | 240 | bool same_ring = ring->idx == i; |
1fbb2e92 CK |
241 | |
242 | id = vm->ids[i++]; | |
243 | if (i == AMDGPU_MAX_RINGS) | |
244 | i = 0; | |
8d76001e | 245 | |
1fbb2e92 CK |
246 | /* Check all the prerequisites to using this VMID */ |
247 | if (!id) | |
248 | continue; | |
249 | ||
250 | if (atomic64_read(&id->owner) != vm->client_id) | |
251 | continue; | |
252 | ||
253 | if (pd_addr != id->pd_gpu_addr) | |
254 | continue; | |
255 | ||
3dab83be | 256 | if (!same_ring && |
1fbb2e92 CK |
257 | (!id->last_flush || !fence_is_signaled(id->last_flush))) |
258 | continue; | |
259 | ||
260 | flushed = id->flushed_updates; | |
261 | if (updates && | |
262 | (!flushed || fence_is_later(updates, flushed))) | |
263 | continue; | |
264 | ||
3dab83be CK |
265 | /* Good we can use this VMID. Remember this submission as |
266 | * user of the VMID. | |
267 | */ | |
1fbb2e92 CK |
268 | r = amdgpu_sync_fence(ring->adev, &id->active, fence); |
269 | if (r) | |
270 | goto error; | |
8d76001e | 271 | |
1fbb2e92 CK |
272 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); |
273 | vm->ids[ring->idx] = id; | |
8d76001e | 274 | |
1fbb2e92 CK |
275 | *vm_id = id - adev->vm_manager.ids; |
276 | *vm_pd_addr = AMDGPU_VM_NO_FLUSH; | |
277 | trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); | |
8d76001e | 278 | |
1fbb2e92 CK |
279 | mutex_unlock(&adev->vm_manager.lock); |
280 | return 0; | |
8d76001e | 281 | |
1fbb2e92 | 282 | } while (i != ring->idx); |
8d76001e | 283 | |
1fbb2e92 CK |
284 | /* Still no ID to use? Then use the idle one found earlier */ |
285 | id = idle; | |
8e9fbeb5 | 286 | |
1fbb2e92 CK |
287 | /* Remember this submission as user of the VMID */ |
288 | r = amdgpu_sync_fence(ring->adev, &id->active, fence); | |
832a902f CK |
289 | if (r) |
290 | goto error; | |
94dd0a4a | 291 | |
832a902f CK |
292 | fence_put(id->first); |
293 | id->first = fence_get(fence); | |
94dd0a4a | 294 | |
41d9eb2c CK |
295 | fence_put(id->last_flush); |
296 | id->last_flush = NULL; | |
297 | ||
832a902f CK |
298 | fence_put(id->flushed_updates); |
299 | id->flushed_updates = fence_get(updates); | |
94dd0a4a | 300 | |
832a902f | 301 | id->pd_gpu_addr = pd_addr; |
4ff37a83 | 302 | |
832a902f | 303 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); |
0ea54b9b | 304 | atomic64_set(&id->owner, vm->client_id); |
832a902f | 305 | vm->ids[ring->idx] = id; |
d38ceaf9 | 306 | |
832a902f CK |
307 | *vm_id = id - adev->vm_manager.ids; |
308 | *vm_pd_addr = pd_addr; | |
309 | trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); | |
310 | ||
311 | error: | |
94dd0a4a | 312 | mutex_unlock(&adev->vm_manager.lock); |
a9a78b32 | 313 | return r; |
d38ceaf9 AD |
314 | } |
315 | ||
316 | /** | |
317 | * amdgpu_vm_flush - hardware flush the vm | |
318 | * | |
319 | * @ring: ring to use for flush | |
cffadc83 | 320 | * @vm_id: vmid number to use |
4ff37a83 | 321 | * @pd_addr: address of the page directory |
d38ceaf9 | 322 | * |
4ff37a83 | 323 | * Emit a VM flush when it is necessary. |
d38ceaf9 | 324 | */ |
41d9eb2c CK |
325 | int amdgpu_vm_flush(struct amdgpu_ring *ring, |
326 | unsigned vm_id, uint64_t pd_addr, | |
327 | uint32_t gds_base, uint32_t gds_size, | |
328 | uint32_t gws_base, uint32_t gws_size, | |
329 | uint32_t oa_base, uint32_t oa_size) | |
d38ceaf9 | 330 | { |
971fe9a9 | 331 | struct amdgpu_device *adev = ring->adev; |
bcb1ba35 | 332 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; |
d564a06e | 333 | bool gds_switch_needed = ring->funcs->emit_gds_switch && ( |
bcb1ba35 CK |
334 | id->gds_base != gds_base || |
335 | id->gds_size != gds_size || | |
336 | id->gws_base != gws_base || | |
337 | id->gws_size != gws_size || | |
338 | id->oa_base != oa_base || | |
339 | id->oa_size != oa_size); | |
41d9eb2c | 340 | int r; |
d564a06e CK |
341 | |
342 | if (ring->funcs->emit_pipeline_sync && ( | |
fe707664 CZ |
343 | pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || |
344 | ring->type == AMDGPU_RING_TYPE_COMPUTE)) | |
d564a06e | 345 | amdgpu_ring_emit_pipeline_sync(ring); |
971fe9a9 | 346 | |
c5637837 ML |
347 | if (ring->funcs->emit_vm_flush && |
348 | pd_addr != AMDGPU_VM_NO_FLUSH) { | |
41d9eb2c CK |
349 | struct fence *fence; |
350 | ||
cffadc83 CK |
351 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); |
352 | amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); | |
41d9eb2c | 353 | |
3dab83be CK |
354 | r = amdgpu_fence_emit(ring, &fence); |
355 | if (r) | |
356 | return r; | |
357 | ||
41d9eb2c | 358 | mutex_lock(&adev->vm_manager.lock); |
3dab83be CK |
359 | fence_put(id->last_flush); |
360 | id->last_flush = fence; | |
41d9eb2c | 361 | mutex_unlock(&adev->vm_manager.lock); |
d38ceaf9 | 362 | } |
cffadc83 | 363 | |
d564a06e | 364 | if (gds_switch_needed) { |
bcb1ba35 CK |
365 | id->gds_base = gds_base; |
366 | id->gds_size = gds_size; | |
367 | id->gws_base = gws_base; | |
368 | id->gws_size = gws_size; | |
369 | id->oa_base = oa_base; | |
370 | id->oa_size = oa_size; | |
cffadc83 CK |
371 | amdgpu_ring_emit_gds_switch(ring, vm_id, |
372 | gds_base, gds_size, | |
373 | gws_base, gws_size, | |
374 | oa_base, oa_size); | |
971fe9a9 | 375 | } |
41d9eb2c CK |
376 | |
377 | return 0; | |
971fe9a9 CK |
378 | } |
379 | ||
380 | /** | |
381 | * amdgpu_vm_reset_id - reset VMID to zero | |
382 | * | |
383 | * @adev: amdgpu device structure | |
384 | * @vm_id: vmid number to use | |
385 | * | |
386 | * Reset saved GDW, GWS and OA to force switch on next flush. | |
387 | */ | |
388 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id) | |
389 | { | |
bcb1ba35 CK |
390 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; |
391 | ||
392 | id->gds_base = 0; | |
393 | id->gds_size = 0; | |
394 | id->gws_base = 0; | |
395 | id->gws_size = 0; | |
396 | id->oa_base = 0; | |
397 | id->oa_size = 0; | |
d38ceaf9 AD |
398 | } |
399 | ||
d38ceaf9 AD |
400 | /** |
401 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | |
402 | * | |
403 | * @vm: requested vm | |
404 | * @bo: requested buffer object | |
405 | * | |
8843dbbb | 406 | * Find @bo inside the requested vm. |
d38ceaf9 AD |
407 | * Search inside the @bos vm list for the requested vm |
408 | * Returns the found bo_va or NULL if none is found | |
409 | * | |
410 | * Object has to be reserved! | |
411 | */ | |
412 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |
413 | struct amdgpu_bo *bo) | |
414 | { | |
415 | struct amdgpu_bo_va *bo_va; | |
416 | ||
417 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
418 | if (bo_va->vm == vm) { | |
419 | return bo_va; | |
420 | } | |
421 | } | |
422 | return NULL; | |
423 | } | |
424 | ||
425 | /** | |
426 | * amdgpu_vm_update_pages - helper to call the right asic function | |
427 | * | |
428 | * @adev: amdgpu_device pointer | |
f4833c4f | 429 | * @vm_update_params: see amdgpu_vm_update_params definition |
d38ceaf9 AD |
430 | * @pe: addr of the page entry |
431 | * @addr: dst addr to write into pe | |
432 | * @count: number of page entries to update | |
433 | * @incr: increase next addr by incr bytes | |
434 | * @flags: hw access flags | |
d38ceaf9 AD |
435 | * |
436 | * Traces the parameters and calls the right asic functions | |
437 | * to setup the page table using the DMA. | |
438 | */ | |
439 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | |
f4833c4f HK |
440 | struct amdgpu_vm_update_params |
441 | *vm_update_params, | |
d38ceaf9 AD |
442 | uint64_t pe, uint64_t addr, |
443 | unsigned count, uint32_t incr, | |
9ab21462 | 444 | uint32_t flags) |
d38ceaf9 AD |
445 | { |
446 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | |
447 | ||
f4833c4f HK |
448 | if (vm_update_params->src) { |
449 | amdgpu_vm_copy_pte(adev, vm_update_params->ib, | |
450 | pe, (vm_update_params->src + (addr >> 12) * 8), count); | |
d38ceaf9 | 451 | |
f4833c4f HK |
452 | } else if (vm_update_params->pages_addr) { |
453 | amdgpu_vm_write_pte(adev, vm_update_params->ib, | |
454 | vm_update_params->pages_addr, | |
455 | pe, addr, count, incr, flags); | |
b07c9d2a CK |
456 | |
457 | } else if (count < 3) { | |
f4833c4f | 458 | amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, |
b07c9d2a | 459 | count, incr, flags); |
d38ceaf9 AD |
460 | |
461 | } else { | |
f4833c4f | 462 | amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, |
d38ceaf9 AD |
463 | count, incr, flags); |
464 | } | |
465 | } | |
466 | ||
467 | /** | |
468 | * amdgpu_vm_clear_bo - initially clear the page dir/table | |
469 | * | |
470 | * @adev: amdgpu_device pointer | |
471 | * @bo: bo to clear | |
ef9f0a83 CZ |
472 | * |
473 | * need to reserve bo first before calling it. | |
d38ceaf9 AD |
474 | */ |
475 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |
2bd9ccfa | 476 | struct amdgpu_vm *vm, |
d38ceaf9 AD |
477 | struct amdgpu_bo *bo) |
478 | { | |
2d55e45a | 479 | struct amdgpu_ring *ring; |
4af9f07c | 480 | struct fence *fence = NULL; |
d71518b5 | 481 | struct amdgpu_job *job; |
f4833c4f | 482 | struct amdgpu_vm_update_params vm_update_params; |
d38ceaf9 AD |
483 | unsigned entries; |
484 | uint64_t addr; | |
485 | int r; | |
486 | ||
f4833c4f | 487 | memset(&vm_update_params, 0, sizeof(vm_update_params)); |
2d55e45a CK |
488 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
489 | ||
ca952613 | 490 | r = reservation_object_reserve_shared(bo->tbo.resv); |
491 | if (r) | |
492 | return r; | |
493 | ||
d38ceaf9 AD |
494 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
495 | if (r) | |
ef9f0a83 | 496 | goto error; |
d38ceaf9 AD |
497 | |
498 | addr = amdgpu_bo_gpu_offset(bo); | |
499 | entries = amdgpu_bo_size(bo) / 8; | |
500 | ||
d71518b5 CK |
501 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); |
502 | if (r) | |
ef9f0a83 | 503 | goto error; |
d38ceaf9 | 504 | |
f4833c4f HK |
505 | vm_update_params.ib = &job->ibs[0]; |
506 | amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, | |
d71518b5 CK |
507 | 0, 0); |
508 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | |
509 | ||
510 | WARN_ON(job->ibs[0].length_dw > 64); | |
2bd9ccfa CK |
511 | r = amdgpu_job_submit(job, ring, &vm->entity, |
512 | AMDGPU_FENCE_OWNER_VM, &fence); | |
d38ceaf9 AD |
513 | if (r) |
514 | goto error_free; | |
515 | ||
d71518b5 | 516 | amdgpu_bo_fence(bo, fence, true); |
281b4223 | 517 | fence_put(fence); |
cadf97b1 | 518 | return 0; |
ef9f0a83 | 519 | |
d38ceaf9 | 520 | error_free: |
d71518b5 | 521 | amdgpu_job_free(job); |
d38ceaf9 | 522 | |
ef9f0a83 | 523 | error: |
d38ceaf9 AD |
524 | return r; |
525 | } | |
526 | ||
527 | /** | |
b07c9d2a | 528 | * amdgpu_vm_map_gart - Resolve gart mapping of addr |
d38ceaf9 | 529 | * |
b07c9d2a | 530 | * @pages_addr: optional DMA address to use for lookup |
d38ceaf9 AD |
531 | * @addr: the unmapped addr |
532 | * | |
533 | * Look up the physical address of the page that the pte resolves | |
b07c9d2a | 534 | * to and return the pointer for the page table entry. |
d38ceaf9 | 535 | */ |
b07c9d2a | 536 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) |
d38ceaf9 AD |
537 | { |
538 | uint64_t result; | |
539 | ||
b07c9d2a CK |
540 | if (pages_addr) { |
541 | /* page table offset */ | |
542 | result = pages_addr[addr >> PAGE_SHIFT]; | |
543 | ||
544 | /* in case cpu page size != gpu page size*/ | |
545 | result |= addr & (~PAGE_MASK); | |
546 | ||
547 | } else { | |
548 | /* No mapping required */ | |
549 | result = addr; | |
550 | } | |
d38ceaf9 | 551 | |
b07c9d2a | 552 | result &= 0xFFFFFFFFFFFFF000ULL; |
d38ceaf9 AD |
553 | |
554 | return result; | |
555 | } | |
556 | ||
557 | /** | |
558 | * amdgpu_vm_update_pdes - make sure that page directory is valid | |
559 | * | |
560 | * @adev: amdgpu_device pointer | |
561 | * @vm: requested vm | |
562 | * @start: start of GPU address range | |
563 | * @end: end of GPU address range | |
564 | * | |
565 | * Allocates new page tables if necessary | |
8843dbbb | 566 | * and updates the page directory. |
d38ceaf9 | 567 | * Returns 0 for success, error for failure. |
d38ceaf9 AD |
568 | */ |
569 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |
570 | struct amdgpu_vm *vm) | |
571 | { | |
2d55e45a | 572 | struct amdgpu_ring *ring; |
d38ceaf9 AD |
573 | struct amdgpu_bo *pd = vm->page_directory; |
574 | uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); | |
575 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | |
576 | uint64_t last_pde = ~0, last_pt = ~0; | |
577 | unsigned count = 0, pt_idx, ndw; | |
d71518b5 | 578 | struct amdgpu_job *job; |
f4833c4f | 579 | struct amdgpu_vm_update_params vm_update_params; |
4af9f07c | 580 | struct fence *fence = NULL; |
d5fc5e82 | 581 | |
d38ceaf9 AD |
582 | int r; |
583 | ||
f4833c4f | 584 | memset(&vm_update_params, 0, sizeof(vm_update_params)); |
2d55e45a CK |
585 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
586 | ||
d38ceaf9 AD |
587 | /* padding, etc. */ |
588 | ndw = 64; | |
589 | ||
590 | /* assume the worst case */ | |
591 | ndw += vm->max_pde_used * 6; | |
592 | ||
d71518b5 CK |
593 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
594 | if (r) | |
d38ceaf9 | 595 | return r; |
d71518b5 | 596 | |
f4833c4f | 597 | vm_update_params.ib = &job->ibs[0]; |
d38ceaf9 AD |
598 | |
599 | /* walk over the address space and update the page directory */ | |
600 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | |
ee1782c3 | 601 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; |
d38ceaf9 AD |
602 | uint64_t pde, pt; |
603 | ||
604 | if (bo == NULL) | |
605 | continue; | |
606 | ||
607 | pt = amdgpu_bo_gpu_offset(bo); | |
608 | if (vm->page_tables[pt_idx].addr == pt) | |
609 | continue; | |
610 | vm->page_tables[pt_idx].addr = pt; | |
611 | ||
612 | pde = pd_addr + pt_idx * 8; | |
613 | if (((last_pde + 8 * count) != pde) || | |
614 | ((last_pt + incr * count) != pt)) { | |
615 | ||
616 | if (count) { | |
f4833c4f | 617 | amdgpu_vm_update_pages(adev, &vm_update_params, |
9ab21462 CK |
618 | last_pde, last_pt, |
619 | count, incr, | |
620 | AMDGPU_PTE_VALID); | |
d38ceaf9 AD |
621 | } |
622 | ||
623 | count = 1; | |
624 | last_pde = pde; | |
625 | last_pt = pt; | |
626 | } else { | |
627 | ++count; | |
628 | } | |
629 | } | |
630 | ||
631 | if (count) | |
f4833c4f HK |
632 | amdgpu_vm_update_pages(adev, &vm_update_params, |
633 | last_pde, last_pt, | |
634 | count, incr, AMDGPU_PTE_VALID); | |
d38ceaf9 | 635 | |
f4833c4f HK |
636 | if (vm_update_params.ib->length_dw != 0) { |
637 | amdgpu_ring_pad_ib(ring, vm_update_params.ib); | |
e86f9cee CK |
638 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, |
639 | AMDGPU_FENCE_OWNER_VM); | |
f4833c4f | 640 | WARN_ON(vm_update_params.ib->length_dw > ndw); |
2bd9ccfa CK |
641 | r = amdgpu_job_submit(job, ring, &vm->entity, |
642 | AMDGPU_FENCE_OWNER_VM, &fence); | |
4af9f07c CZ |
643 | if (r) |
644 | goto error_free; | |
05906dec | 645 | |
4af9f07c | 646 | amdgpu_bo_fence(pd, fence, true); |
05906dec BN |
647 | fence_put(vm->page_directory_fence); |
648 | vm->page_directory_fence = fence_get(fence); | |
281b4223 | 649 | fence_put(fence); |
d5fc5e82 | 650 | |
d71518b5 CK |
651 | } else { |
652 | amdgpu_job_free(job); | |
d5fc5e82 | 653 | } |
d38ceaf9 AD |
654 | |
655 | return 0; | |
d5fc5e82 CZ |
656 | |
657 | error_free: | |
d71518b5 | 658 | amdgpu_job_free(job); |
4af9f07c | 659 | return r; |
d38ceaf9 AD |
660 | } |
661 | ||
662 | /** | |
663 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | |
664 | * | |
665 | * @adev: amdgpu_device pointer | |
f4833c4f | 666 | * @vm_update_params: see amdgpu_vm_update_params definition |
d38ceaf9 AD |
667 | * @pe_start: first PTE to handle |
668 | * @pe_end: last PTE to handle | |
669 | * @addr: addr those PTEs should point to | |
670 | * @flags: hw mapping flags | |
d38ceaf9 AD |
671 | */ |
672 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |
f4833c4f HK |
673 | struct amdgpu_vm_update_params |
674 | *vm_update_params, | |
d38ceaf9 | 675 | uint64_t pe_start, uint64_t pe_end, |
9ab21462 | 676 | uint64_t addr, uint32_t flags) |
d38ceaf9 AD |
677 | { |
678 | /** | |
679 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
680 | * field in the PTE. When this field is set to a non-zero value, page | |
681 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
682 | * flags are considered valid for all PTEs within the fragment range | |
683 | * and corresponding mappings are assumed to be physically contiguous. | |
684 | * | |
685 | * The L1 TLB can store a single PTE for the whole fragment, | |
686 | * significantly increasing the space available for translation | |
687 | * caching. This leads to large improvements in throughput when the | |
688 | * TLB is under pressure. | |
689 | * | |
690 | * The L2 TLB distributes small and large fragments into two | |
691 | * asymmetric partitions. The large fragment cache is significantly | |
692 | * larger. Thus, we try to use large fragments wherever possible. | |
693 | * Userspace can support this by aligning virtual base address and | |
694 | * allocation size to the fragment size. | |
695 | */ | |
696 | ||
697 | /* SI and newer are optimized for 64KB */ | |
698 | uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; | |
699 | uint64_t frag_align = 0x80; | |
700 | ||
701 | uint64_t frag_start = ALIGN(pe_start, frag_align); | |
702 | uint64_t frag_end = pe_end & ~(frag_align - 1); | |
703 | ||
704 | unsigned count; | |
705 | ||
31f6c1fe CK |
706 | /* Abort early if there isn't anything to do */ |
707 | if (pe_start == pe_end) | |
708 | return; | |
709 | ||
d38ceaf9 | 710 | /* system pages are non continuously */ |
f4833c4f HK |
711 | if (vm_update_params->src || vm_update_params->pages_addr || |
712 | !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { | |
d38ceaf9 AD |
713 | |
714 | count = (pe_end - pe_start) / 8; | |
f4833c4f | 715 | amdgpu_vm_update_pages(adev, vm_update_params, pe_start, |
9ab21462 CK |
716 | addr, count, AMDGPU_GPU_PAGE_SIZE, |
717 | flags); | |
d38ceaf9 AD |
718 | return; |
719 | } | |
720 | ||
721 | /* handle the 4K area at the beginning */ | |
722 | if (pe_start != frag_start) { | |
723 | count = (frag_start - pe_start) / 8; | |
f4833c4f | 724 | amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, |
9ab21462 | 725 | count, AMDGPU_GPU_PAGE_SIZE, flags); |
d38ceaf9 AD |
726 | addr += AMDGPU_GPU_PAGE_SIZE * count; |
727 | } | |
728 | ||
729 | /* handle the area in the middle */ | |
730 | count = (frag_end - frag_start) / 8; | |
f4833c4f | 731 | amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, |
9ab21462 | 732 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); |
d38ceaf9 AD |
733 | |
734 | /* handle the 4K area at the end */ | |
735 | if (frag_end != pe_end) { | |
736 | addr += AMDGPU_GPU_PAGE_SIZE * count; | |
737 | count = (pe_end - frag_end) / 8; | |
f4833c4f | 738 | amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, |
9ab21462 | 739 | count, AMDGPU_GPU_PAGE_SIZE, flags); |
d38ceaf9 AD |
740 | } |
741 | } | |
742 | ||
743 | /** | |
744 | * amdgpu_vm_update_ptes - make sure that page tables are valid | |
745 | * | |
746 | * @adev: amdgpu_device pointer | |
f4833c4f | 747 | * @vm_update_params: see amdgpu_vm_update_params definition |
d38ceaf9 AD |
748 | * @vm: requested vm |
749 | * @start: start of GPU address range | |
750 | * @end: end of GPU address range | |
677131a1 | 751 | * @dst: destination address to map to, the next dst inside the function |
d38ceaf9 AD |
752 | * @flags: mapping flags |
753 | * | |
8843dbbb | 754 | * Update the page tables in the range @start - @end. |
d38ceaf9 | 755 | */ |
a1e08d3b | 756 | static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, |
f4833c4f HK |
757 | struct amdgpu_vm_update_params |
758 | *vm_update_params, | |
a1e08d3b | 759 | struct amdgpu_vm *vm, |
a1e08d3b CK |
760 | uint64_t start, uint64_t end, |
761 | uint64_t dst, uint32_t flags) | |
d38ceaf9 | 762 | { |
31f6c1fe CK |
763 | const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; |
764 | ||
21718497 | 765 | uint64_t cur_pe_start, cur_pe_end, cur_dst; |
677131a1 | 766 | uint64_t addr; /* next GPU address to be updated */ |
21718497 AX |
767 | uint64_t pt_idx; |
768 | struct amdgpu_bo *pt; | |
769 | unsigned nptes; /* next number of ptes to be updated */ | |
770 | uint64_t next_pe_start; | |
771 | ||
772 | /* initialize the variables */ | |
773 | addr = start; | |
774 | pt_idx = addr >> amdgpu_vm_block_size; | |
775 | pt = vm->page_tables[pt_idx].entry.robj; | |
776 | ||
777 | if ((addr & ~mask) == (end & ~mask)) | |
778 | nptes = end - addr; | |
779 | else | |
780 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | |
781 | ||
782 | cur_pe_start = amdgpu_bo_gpu_offset(pt); | |
783 | cur_pe_start += (addr & mask) * 8; | |
784 | cur_pe_end = cur_pe_start + 8 * nptes; | |
785 | cur_dst = dst; | |
786 | ||
787 | /* for next ptb*/ | |
788 | addr += nptes; | |
789 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | |
d38ceaf9 AD |
790 | |
791 | /* walk over the address space and update the page tables */ | |
21718497 AX |
792 | while (addr < end) { |
793 | pt_idx = addr >> amdgpu_vm_block_size; | |
794 | pt = vm->page_tables[pt_idx].entry.robj; | |
d38ceaf9 AD |
795 | |
796 | if ((addr & ~mask) == (end & ~mask)) | |
797 | nptes = end - addr; | |
798 | else | |
799 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | |
800 | ||
677131a1 AX |
801 | next_pe_start = amdgpu_bo_gpu_offset(pt); |
802 | next_pe_start += (addr & mask) * 8; | |
d38ceaf9 | 803 | |
3a6f8e0c AX |
804 | if (cur_pe_end == next_pe_start) { |
805 | /* The next ptb is consecutive to current ptb. | |
806 | * Don't call amdgpu_vm_frag_ptes now. | |
807 | * Will update two ptbs together in future. | |
808 | */ | |
809 | cur_pe_end += 8 * nptes; | |
810 | } else { | |
f4833c4f | 811 | amdgpu_vm_frag_ptes(adev, vm_update_params, |
677131a1 AX |
812 | cur_pe_start, cur_pe_end, |
813 | cur_dst, flags); | |
d38ceaf9 | 814 | |
677131a1 AX |
815 | cur_pe_start = next_pe_start; |
816 | cur_pe_end = next_pe_start + 8 * nptes; | |
817 | cur_dst = dst; | |
d38ceaf9 AD |
818 | } |
819 | ||
21718497 | 820 | /* for next ptb*/ |
d38ceaf9 AD |
821 | addr += nptes; |
822 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | |
823 | } | |
824 | ||
677131a1 AX |
825 | amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, |
826 | cur_pe_end, cur_dst, flags); | |
d38ceaf9 AD |
827 | } |
828 | ||
d38ceaf9 AD |
829 | /** |
830 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | |
831 | * | |
832 | * @adev: amdgpu_device pointer | |
fa3ab3c7 CK |
833 | * @src: address where to copy page table entries from |
834 | * @pages_addr: DMA addresses to use for mapping | |
d38ceaf9 | 835 | * @vm: requested vm |
a14faa65 CK |
836 | * @start: start of mapped range |
837 | * @last: last mapped entry | |
838 | * @flags: flags for the entries | |
d38ceaf9 | 839 | * @addr: addr to set the area to |
d38ceaf9 AD |
840 | * @fence: optional resulting fence |
841 | * | |
a14faa65 | 842 | * Fill in the page table entries between @start and @last. |
d38ceaf9 | 843 | * Returns 0 for success, -EINVAL for failure. |
d38ceaf9 AD |
844 | */ |
845 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |
fa3ab3c7 CK |
846 | uint64_t src, |
847 | dma_addr_t *pages_addr, | |
d38ceaf9 | 848 | struct amdgpu_vm *vm, |
a14faa65 CK |
849 | uint64_t start, uint64_t last, |
850 | uint32_t flags, uint64_t addr, | |
851 | struct fence **fence) | |
d38ceaf9 | 852 | { |
2d55e45a | 853 | struct amdgpu_ring *ring; |
a1e08d3b | 854 | void *owner = AMDGPU_FENCE_OWNER_VM; |
d38ceaf9 | 855 | unsigned nptes, ncmds, ndw; |
d71518b5 | 856 | struct amdgpu_job *job; |
f4833c4f | 857 | struct amdgpu_vm_update_params vm_update_params; |
4af9f07c | 858 | struct fence *f = NULL; |
d38ceaf9 AD |
859 | int r; |
860 | ||
2d55e45a | 861 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
f4833c4f HK |
862 | memset(&vm_update_params, 0, sizeof(vm_update_params)); |
863 | vm_update_params.src = src; | |
864 | vm_update_params.pages_addr = pages_addr; | |
2d55e45a | 865 | |
a1e08d3b CK |
866 | /* sync to everything on unmapping */ |
867 | if (!(flags & AMDGPU_PTE_VALID)) | |
868 | owner = AMDGPU_FENCE_OWNER_UNDEFINED; | |
869 | ||
a14faa65 | 870 | nptes = last - start + 1; |
d38ceaf9 AD |
871 | |
872 | /* | |
873 | * reserve space for one command every (1 << BLOCK_SIZE) | |
874 | * entries or 2k dwords (whatever is smaller) | |
875 | */ | |
876 | ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; | |
877 | ||
878 | /* padding, etc. */ | |
879 | ndw = 64; | |
880 | ||
f4833c4f | 881 | if (vm_update_params.src) { |
d38ceaf9 AD |
882 | /* only copy commands needed */ |
883 | ndw += ncmds * 7; | |
884 | ||
f4833c4f | 885 | } else if (vm_update_params.pages_addr) { |
d38ceaf9 AD |
886 | /* header for write data commands */ |
887 | ndw += ncmds * 4; | |
888 | ||
889 | /* body of write data command */ | |
890 | ndw += nptes * 2; | |
891 | ||
892 | } else { | |
893 | /* set page commands needed */ | |
894 | ndw += ncmds * 10; | |
895 | ||
896 | /* two extra commands for begin/end of fragment */ | |
897 | ndw += 2 * 10; | |
898 | } | |
899 | ||
d71518b5 CK |
900 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
901 | if (r) | |
d38ceaf9 | 902 | return r; |
d71518b5 | 903 | |
f4833c4f | 904 | vm_update_params.ib = &job->ibs[0]; |
d5fc5e82 | 905 | |
e86f9cee | 906 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, |
a1e08d3b CK |
907 | owner); |
908 | if (r) | |
909 | goto error_free; | |
d38ceaf9 | 910 | |
a1e08d3b CK |
911 | r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); |
912 | if (r) | |
913 | goto error_free; | |
914 | ||
f4833c4f | 915 | amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, |
fa3ab3c7 | 916 | last + 1, addr, flags); |
d38ceaf9 | 917 | |
f4833c4f HK |
918 | amdgpu_ring_pad_ib(ring, vm_update_params.ib); |
919 | WARN_ON(vm_update_params.ib->length_dw > ndw); | |
2bd9ccfa CK |
920 | r = amdgpu_job_submit(job, ring, &vm->entity, |
921 | AMDGPU_FENCE_OWNER_VM, &f); | |
4af9f07c CZ |
922 | if (r) |
923 | goto error_free; | |
d38ceaf9 | 924 | |
bf60efd3 | 925 | amdgpu_bo_fence(vm->page_directory, f, true); |
4af9f07c CZ |
926 | if (fence) { |
927 | fence_put(*fence); | |
928 | *fence = fence_get(f); | |
929 | } | |
281b4223 | 930 | fence_put(f); |
d38ceaf9 | 931 | return 0; |
d5fc5e82 CZ |
932 | |
933 | error_free: | |
d71518b5 | 934 | amdgpu_job_free(job); |
4af9f07c | 935 | return r; |
d38ceaf9 AD |
936 | } |
937 | ||
a14faa65 CK |
938 | /** |
939 | * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks | |
940 | * | |
941 | * @adev: amdgpu_device pointer | |
8358dcee CK |
942 | * @gtt_flags: flags as they are used for GTT |
943 | * @pages_addr: DMA addresses to use for mapping | |
a14faa65 CK |
944 | * @vm: requested vm |
945 | * @mapping: mapped range and flags to use for the update | |
946 | * @addr: addr to set the area to | |
8358dcee | 947 | * @flags: HW flags for the mapping |
a14faa65 CK |
948 | * @fence: optional resulting fence |
949 | * | |
950 | * Split the mapping into smaller chunks so that each update fits | |
951 | * into a SDMA IB. | |
952 | * Returns 0 for success, -EINVAL for failure. | |
953 | */ | |
954 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |
a14faa65 | 955 | uint32_t gtt_flags, |
8358dcee | 956 | dma_addr_t *pages_addr, |
a14faa65 CK |
957 | struct amdgpu_vm *vm, |
958 | struct amdgpu_bo_va_mapping *mapping, | |
fa3ab3c7 CK |
959 | uint32_t flags, uint64_t addr, |
960 | struct fence **fence) | |
a14faa65 CK |
961 | { |
962 | const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; | |
963 | ||
fa3ab3c7 | 964 | uint64_t src = 0, start = mapping->it.start; |
a14faa65 CK |
965 | int r; |
966 | ||
967 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | |
968 | * but in case of something, we filter the flags in first place | |
969 | */ | |
970 | if (!(mapping->flags & AMDGPU_PTE_READABLE)) | |
971 | flags &= ~AMDGPU_PTE_READABLE; | |
972 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) | |
973 | flags &= ~AMDGPU_PTE_WRITEABLE; | |
974 | ||
975 | trace_amdgpu_vm_bo_update(mapping); | |
976 | ||
8358dcee | 977 | if (pages_addr) { |
fa3ab3c7 CK |
978 | if (flags == gtt_flags) |
979 | src = adev->gart.table_addr + (addr >> 12) * 8; | |
fa3ab3c7 CK |
980 | addr = 0; |
981 | } | |
a14faa65 CK |
982 | addr += mapping->offset; |
983 | ||
8358dcee | 984 | if (!pages_addr || src) |
fa3ab3c7 | 985 | return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, |
a14faa65 CK |
986 | start, mapping->it.last, |
987 | flags, addr, fence); | |
988 | ||
989 | while (start != mapping->it.last + 1) { | |
990 | uint64_t last; | |
991 | ||
fb29b57c | 992 | last = min((uint64_t)mapping->it.last, start + max_size - 1); |
fa3ab3c7 | 993 | r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, |
a14faa65 CK |
994 | start, last, flags, addr, |
995 | fence); | |
996 | if (r) | |
997 | return r; | |
998 | ||
999 | start = last + 1; | |
fb29b57c | 1000 | addr += max_size * AMDGPU_GPU_PAGE_SIZE; |
a14faa65 CK |
1001 | } |
1002 | ||
1003 | return 0; | |
1004 | } | |
1005 | ||
d38ceaf9 AD |
1006 | /** |
1007 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table | |
1008 | * | |
1009 | * @adev: amdgpu_device pointer | |
1010 | * @bo_va: requested BO and VM object | |
1011 | * @mem: ttm mem | |
1012 | * | |
1013 | * Fill in the page table entries for @bo_va. | |
1014 | * Returns 0 for success, -EINVAL for failure. | |
1015 | * | |
1016 | * Object have to be reserved and mutex must be locked! | |
1017 | */ | |
1018 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |
1019 | struct amdgpu_bo_va *bo_va, | |
1020 | struct ttm_mem_reg *mem) | |
1021 | { | |
1022 | struct amdgpu_vm *vm = bo_va->vm; | |
1023 | struct amdgpu_bo_va_mapping *mapping; | |
8358dcee | 1024 | dma_addr_t *pages_addr = NULL; |
fa3ab3c7 | 1025 | uint32_t gtt_flags, flags; |
d38ceaf9 AD |
1026 | uint64_t addr; |
1027 | int r; | |
1028 | ||
1029 | if (mem) { | |
8358dcee CK |
1030 | struct ttm_dma_tt *ttm; |
1031 | ||
b7d698d7 | 1032 | addr = (u64)mem->start << PAGE_SHIFT; |
9ab21462 CK |
1033 | switch (mem->mem_type) { |
1034 | case TTM_PL_TT: | |
8358dcee CK |
1035 | ttm = container_of(bo_va->bo->tbo.ttm, struct |
1036 | ttm_dma_tt, ttm); | |
1037 | pages_addr = ttm->dma_address; | |
9ab21462 CK |
1038 | break; |
1039 | ||
1040 | case TTM_PL_VRAM: | |
d38ceaf9 | 1041 | addr += adev->vm_manager.vram_base_offset; |
9ab21462 CK |
1042 | break; |
1043 | ||
1044 | default: | |
1045 | break; | |
1046 | } | |
d38ceaf9 AD |
1047 | } else { |
1048 | addr = 0; | |
1049 | } | |
1050 | ||
d38ceaf9 | 1051 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); |
fa3ab3c7 | 1052 | gtt_flags = (adev == bo_va->bo->adev) ? flags : 0; |
d38ceaf9 | 1053 | |
7fc11959 CK |
1054 | spin_lock(&vm->status_lock); |
1055 | if (!list_empty(&bo_va->vm_status)) | |
1056 | list_splice_init(&bo_va->valids, &bo_va->invalids); | |
1057 | spin_unlock(&vm->status_lock); | |
1058 | ||
1059 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
8358dcee CK |
1060 | r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm, |
1061 | mapping, flags, addr, | |
1062 | &bo_va->last_pt_update); | |
d38ceaf9 AD |
1063 | if (r) |
1064 | return r; | |
1065 | } | |
1066 | ||
d6c10f6b CK |
1067 | if (trace_amdgpu_vm_bo_mapping_enabled()) { |
1068 | list_for_each_entry(mapping, &bo_va->valids, list) | |
1069 | trace_amdgpu_vm_bo_mapping(mapping); | |
1070 | ||
1071 | list_for_each_entry(mapping, &bo_va->invalids, list) | |
1072 | trace_amdgpu_vm_bo_mapping(mapping); | |
1073 | } | |
1074 | ||
d38ceaf9 | 1075 | spin_lock(&vm->status_lock); |
6d1d0ef7 | 1076 | list_splice_init(&bo_va->invalids, &bo_va->valids); |
d38ceaf9 | 1077 | list_del_init(&bo_va->vm_status); |
7fc11959 CK |
1078 | if (!mem) |
1079 | list_add(&bo_va->vm_status, &vm->cleared); | |
d38ceaf9 AD |
1080 | spin_unlock(&vm->status_lock); |
1081 | ||
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | /** | |
1086 | * amdgpu_vm_clear_freed - clear freed BOs in the PT | |
1087 | * | |
1088 | * @adev: amdgpu_device pointer | |
1089 | * @vm: requested vm | |
1090 | * | |
1091 | * Make sure all freed BOs are cleared in the PT. | |
1092 | * Returns 0 for success. | |
1093 | * | |
1094 | * PTs have to be reserved and mutex must be locked! | |
1095 | */ | |
1096 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |
1097 | struct amdgpu_vm *vm) | |
1098 | { | |
1099 | struct amdgpu_bo_va_mapping *mapping; | |
1100 | int r; | |
1101 | ||
1102 | while (!list_empty(&vm->freed)) { | |
1103 | mapping = list_first_entry(&vm->freed, | |
1104 | struct amdgpu_bo_va_mapping, list); | |
1105 | list_del(&mapping->list); | |
e17841b9 | 1106 | |
8358dcee | 1107 | r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping, |
fa3ab3c7 | 1108 | 0, 0, NULL); |
d38ceaf9 AD |
1109 | kfree(mapping); |
1110 | if (r) | |
1111 | return r; | |
1112 | ||
1113 | } | |
1114 | return 0; | |
1115 | ||
1116 | } | |
1117 | ||
1118 | /** | |
1119 | * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT | |
1120 | * | |
1121 | * @adev: amdgpu_device pointer | |
1122 | * @vm: requested vm | |
1123 | * | |
1124 | * Make sure all invalidated BOs are cleared in the PT. | |
1125 | * Returns 0 for success. | |
1126 | * | |
1127 | * PTs have to be reserved and mutex must be locked! | |
1128 | */ | |
1129 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |
cfe2c978 | 1130 | struct amdgpu_vm *vm, struct amdgpu_sync *sync) |
d38ceaf9 | 1131 | { |
cfe2c978 | 1132 | struct amdgpu_bo_va *bo_va = NULL; |
91e1a520 | 1133 | int r = 0; |
d38ceaf9 AD |
1134 | |
1135 | spin_lock(&vm->status_lock); | |
1136 | while (!list_empty(&vm->invalidated)) { | |
1137 | bo_va = list_first_entry(&vm->invalidated, | |
1138 | struct amdgpu_bo_va, vm_status); | |
1139 | spin_unlock(&vm->status_lock); | |
32b41ac2 | 1140 | |
d38ceaf9 AD |
1141 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); |
1142 | if (r) | |
1143 | return r; | |
1144 | ||
1145 | spin_lock(&vm->status_lock); | |
1146 | } | |
1147 | spin_unlock(&vm->status_lock); | |
1148 | ||
cfe2c978 | 1149 | if (bo_va) |
bb1e38a4 | 1150 | r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); |
91e1a520 CK |
1151 | |
1152 | return r; | |
d38ceaf9 AD |
1153 | } |
1154 | ||
1155 | /** | |
1156 | * amdgpu_vm_bo_add - add a bo to a specific vm | |
1157 | * | |
1158 | * @adev: amdgpu_device pointer | |
1159 | * @vm: requested vm | |
1160 | * @bo: amdgpu buffer object | |
1161 | * | |
8843dbbb | 1162 | * Add @bo into the requested vm. |
d38ceaf9 AD |
1163 | * Add @bo to the list of bos associated with the vm |
1164 | * Returns newly added bo_va or NULL for failure | |
1165 | * | |
1166 | * Object has to be reserved! | |
1167 | */ | |
1168 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |
1169 | struct amdgpu_vm *vm, | |
1170 | struct amdgpu_bo *bo) | |
1171 | { | |
1172 | struct amdgpu_bo_va *bo_va; | |
1173 | ||
1174 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); | |
1175 | if (bo_va == NULL) { | |
1176 | return NULL; | |
1177 | } | |
1178 | bo_va->vm = vm; | |
1179 | bo_va->bo = bo; | |
d38ceaf9 AD |
1180 | bo_va->ref_count = 1; |
1181 | INIT_LIST_HEAD(&bo_va->bo_list); | |
7fc11959 CK |
1182 | INIT_LIST_HEAD(&bo_va->valids); |
1183 | INIT_LIST_HEAD(&bo_va->invalids); | |
d38ceaf9 | 1184 | INIT_LIST_HEAD(&bo_va->vm_status); |
32b41ac2 | 1185 | |
d38ceaf9 | 1186 | list_add_tail(&bo_va->bo_list, &bo->va); |
d38ceaf9 AD |
1187 | |
1188 | return bo_va; | |
1189 | } | |
1190 | ||
1191 | /** | |
1192 | * amdgpu_vm_bo_map - map bo inside a vm | |
1193 | * | |
1194 | * @adev: amdgpu_device pointer | |
1195 | * @bo_va: bo_va to store the address | |
1196 | * @saddr: where to map the BO | |
1197 | * @offset: requested offset in the BO | |
1198 | * @flags: attributes of pages (read/write/valid/etc.) | |
1199 | * | |
1200 | * Add a mapping of the BO at the specefied addr into the VM. | |
1201 | * Returns 0 for success, error for failure. | |
1202 | * | |
49b02b18 | 1203 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
1204 | */ |
1205 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |
1206 | struct amdgpu_bo_va *bo_va, | |
1207 | uint64_t saddr, uint64_t offset, | |
1208 | uint64_t size, uint32_t flags) | |
1209 | { | |
1210 | struct amdgpu_bo_va_mapping *mapping; | |
1211 | struct amdgpu_vm *vm = bo_va->vm; | |
1212 | struct interval_tree_node *it; | |
1213 | unsigned last_pfn, pt_idx; | |
1214 | uint64_t eaddr; | |
1215 | int r; | |
1216 | ||
0be52de9 CK |
1217 | /* validate the parameters */ |
1218 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | |
49b02b18 | 1219 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
0be52de9 | 1220 | return -EINVAL; |
0be52de9 | 1221 | |
d38ceaf9 | 1222 | /* make sure object fit at this offset */ |
005ae95e | 1223 | eaddr = saddr + size - 1; |
49b02b18 | 1224 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) |
d38ceaf9 | 1225 | return -EINVAL; |
d38ceaf9 AD |
1226 | |
1227 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | |
005ae95e FK |
1228 | if (last_pfn >= adev->vm_manager.max_pfn) { |
1229 | dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", | |
d38ceaf9 | 1230 | last_pfn, adev->vm_manager.max_pfn); |
d38ceaf9 AD |
1231 | return -EINVAL; |
1232 | } | |
1233 | ||
d38ceaf9 AD |
1234 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
1235 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
1236 | ||
005ae95e | 1237 | it = interval_tree_iter_first(&vm->va, saddr, eaddr); |
d38ceaf9 AD |
1238 | if (it) { |
1239 | struct amdgpu_bo_va_mapping *tmp; | |
1240 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | |
1241 | /* bo and tmp overlap, invalid addr */ | |
1242 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | |
1243 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | |
1244 | tmp->it.start, tmp->it.last + 1); | |
d38ceaf9 | 1245 | r = -EINVAL; |
f48b2659 | 1246 | goto error; |
d38ceaf9 AD |
1247 | } |
1248 | ||
1249 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | |
1250 | if (!mapping) { | |
d38ceaf9 | 1251 | r = -ENOMEM; |
f48b2659 | 1252 | goto error; |
d38ceaf9 AD |
1253 | } |
1254 | ||
1255 | INIT_LIST_HEAD(&mapping->list); | |
1256 | mapping->it.start = saddr; | |
005ae95e | 1257 | mapping->it.last = eaddr; |
d38ceaf9 AD |
1258 | mapping->offset = offset; |
1259 | mapping->flags = flags; | |
1260 | ||
7fc11959 | 1261 | list_add(&mapping->list, &bo_va->invalids); |
d38ceaf9 AD |
1262 | interval_tree_insert(&mapping->it, &vm->va); |
1263 | ||
1264 | /* Make sure the page tables are allocated */ | |
1265 | saddr >>= amdgpu_vm_block_size; | |
1266 | eaddr >>= amdgpu_vm_block_size; | |
1267 | ||
1268 | BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); | |
1269 | ||
1270 | if (eaddr > vm->max_pde_used) | |
1271 | vm->max_pde_used = eaddr; | |
1272 | ||
d38ceaf9 AD |
1273 | /* walk over the address space and allocate the page tables */ |
1274 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | |
bf60efd3 | 1275 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
ee1782c3 | 1276 | struct amdgpu_bo_list_entry *entry; |
d38ceaf9 AD |
1277 | struct amdgpu_bo *pt; |
1278 | ||
ee1782c3 CK |
1279 | entry = &vm->page_tables[pt_idx].entry; |
1280 | if (entry->robj) | |
d38ceaf9 AD |
1281 | continue; |
1282 | ||
d38ceaf9 AD |
1283 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1284 | AMDGPU_GPU_PAGE_SIZE, true, | |
857d913d AD |
1285 | AMDGPU_GEM_DOMAIN_VRAM, |
1286 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | |
bf60efd3 | 1287 | NULL, resv, &pt); |
49b02b18 | 1288 | if (r) |
d38ceaf9 | 1289 | goto error_free; |
49b02b18 | 1290 | |
82b9c55b CK |
1291 | /* Keep a reference to the page table to avoid freeing |
1292 | * them up in the wrong order. | |
1293 | */ | |
1294 | pt->parent = amdgpu_bo_ref(vm->page_directory); | |
1295 | ||
2bd9ccfa | 1296 | r = amdgpu_vm_clear_bo(adev, vm, pt); |
d38ceaf9 AD |
1297 | if (r) { |
1298 | amdgpu_bo_unref(&pt); | |
1299 | goto error_free; | |
1300 | } | |
1301 | ||
ee1782c3 | 1302 | entry->robj = pt; |
ee1782c3 CK |
1303 | entry->priority = 0; |
1304 | entry->tv.bo = &entry->robj->tbo; | |
1305 | entry->tv.shared = true; | |
2f568dbd | 1306 | entry->user_pages = NULL; |
d38ceaf9 | 1307 | vm->page_tables[pt_idx].addr = 0; |
d38ceaf9 AD |
1308 | } |
1309 | ||
d38ceaf9 AD |
1310 | return 0; |
1311 | ||
1312 | error_free: | |
d38ceaf9 AD |
1313 | list_del(&mapping->list); |
1314 | interval_tree_remove(&mapping->it, &vm->va); | |
93e3e438 | 1315 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 AD |
1316 | kfree(mapping); |
1317 | ||
f48b2659 | 1318 | error: |
d38ceaf9 AD |
1319 | return r; |
1320 | } | |
1321 | ||
1322 | /** | |
1323 | * amdgpu_vm_bo_unmap - remove bo mapping from vm | |
1324 | * | |
1325 | * @adev: amdgpu_device pointer | |
1326 | * @bo_va: bo_va to remove the address from | |
1327 | * @saddr: where to the BO is mapped | |
1328 | * | |
1329 | * Remove a mapping of the BO at the specefied addr from the VM. | |
1330 | * Returns 0 for success, error for failure. | |
1331 | * | |
49b02b18 | 1332 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
1333 | */ |
1334 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |
1335 | struct amdgpu_bo_va *bo_va, | |
1336 | uint64_t saddr) | |
1337 | { | |
1338 | struct amdgpu_bo_va_mapping *mapping; | |
1339 | struct amdgpu_vm *vm = bo_va->vm; | |
7fc11959 | 1340 | bool valid = true; |
d38ceaf9 | 1341 | |
6c7fc503 | 1342 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
32b41ac2 | 1343 | |
7fc11959 | 1344 | list_for_each_entry(mapping, &bo_va->valids, list) { |
d38ceaf9 AD |
1345 | if (mapping->it.start == saddr) |
1346 | break; | |
1347 | } | |
1348 | ||
7fc11959 CK |
1349 | if (&mapping->list == &bo_va->valids) { |
1350 | valid = false; | |
1351 | ||
1352 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
1353 | if (mapping->it.start == saddr) | |
1354 | break; | |
1355 | } | |
1356 | ||
32b41ac2 | 1357 | if (&mapping->list == &bo_va->invalids) |
7fc11959 | 1358 | return -ENOENT; |
d38ceaf9 | 1359 | } |
32b41ac2 | 1360 | |
d38ceaf9 AD |
1361 | list_del(&mapping->list); |
1362 | interval_tree_remove(&mapping->it, &vm->va); | |
93e3e438 | 1363 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 | 1364 | |
e17841b9 | 1365 | if (valid) |
d38ceaf9 | 1366 | list_add(&mapping->list, &vm->freed); |
e17841b9 | 1367 | else |
d38ceaf9 | 1368 | kfree(mapping); |
d38ceaf9 AD |
1369 | |
1370 | return 0; | |
1371 | } | |
1372 | ||
1373 | /** | |
1374 | * amdgpu_vm_bo_rmv - remove a bo to a specific vm | |
1375 | * | |
1376 | * @adev: amdgpu_device pointer | |
1377 | * @bo_va: requested bo_va | |
1378 | * | |
8843dbbb | 1379 | * Remove @bo_va->bo from the requested vm. |
d38ceaf9 AD |
1380 | * |
1381 | * Object have to be reserved! | |
1382 | */ | |
1383 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |
1384 | struct amdgpu_bo_va *bo_va) | |
1385 | { | |
1386 | struct amdgpu_bo_va_mapping *mapping, *next; | |
1387 | struct amdgpu_vm *vm = bo_va->vm; | |
1388 | ||
1389 | list_del(&bo_va->bo_list); | |
1390 | ||
d38ceaf9 AD |
1391 | spin_lock(&vm->status_lock); |
1392 | list_del(&bo_va->vm_status); | |
1393 | spin_unlock(&vm->status_lock); | |
1394 | ||
7fc11959 | 1395 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
d38ceaf9 AD |
1396 | list_del(&mapping->list); |
1397 | interval_tree_remove(&mapping->it, &vm->va); | |
93e3e438 | 1398 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
7fc11959 CK |
1399 | list_add(&mapping->list, &vm->freed); |
1400 | } | |
1401 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | |
1402 | list_del(&mapping->list); | |
1403 | interval_tree_remove(&mapping->it, &vm->va); | |
1404 | kfree(mapping); | |
d38ceaf9 | 1405 | } |
32b41ac2 | 1406 | |
bb1e38a4 | 1407 | fence_put(bo_va->last_pt_update); |
d38ceaf9 | 1408 | kfree(bo_va); |
d38ceaf9 AD |
1409 | } |
1410 | ||
1411 | /** | |
1412 | * amdgpu_vm_bo_invalidate - mark the bo as invalid | |
1413 | * | |
1414 | * @adev: amdgpu_device pointer | |
1415 | * @vm: requested vm | |
1416 | * @bo: amdgpu buffer object | |
1417 | * | |
8843dbbb | 1418 | * Mark @bo as invalid. |
d38ceaf9 AD |
1419 | */ |
1420 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | |
1421 | struct amdgpu_bo *bo) | |
1422 | { | |
1423 | struct amdgpu_bo_va *bo_va; | |
1424 | ||
1425 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
7fc11959 CK |
1426 | spin_lock(&bo_va->vm->status_lock); |
1427 | if (list_empty(&bo_va->vm_status)) | |
d38ceaf9 | 1428 | list_add(&bo_va->vm_status, &bo_va->vm->invalidated); |
7fc11959 | 1429 | spin_unlock(&bo_va->vm->status_lock); |
d38ceaf9 AD |
1430 | } |
1431 | } | |
1432 | ||
1433 | /** | |
1434 | * amdgpu_vm_init - initialize a vm instance | |
1435 | * | |
1436 | * @adev: amdgpu_device pointer | |
1437 | * @vm: requested vm | |
1438 | * | |
8843dbbb | 1439 | * Init @vm fields. |
d38ceaf9 AD |
1440 | */ |
1441 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1442 | { | |
1443 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, | |
1444 | AMDGPU_VM_PTE_COUNT * 8); | |
9571e1d8 | 1445 | unsigned pd_size, pd_entries; |
2d55e45a CK |
1446 | unsigned ring_instance; |
1447 | struct amdgpu_ring *ring; | |
2bd9ccfa | 1448 | struct amd_sched_rq *rq; |
d38ceaf9 AD |
1449 | int i, r; |
1450 | ||
bcb1ba35 CK |
1451 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
1452 | vm->ids[i] = NULL; | |
d38ceaf9 | 1453 | vm->va = RB_ROOT; |
031e2983 | 1454 | vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); |
d38ceaf9 AD |
1455 | spin_lock_init(&vm->status_lock); |
1456 | INIT_LIST_HEAD(&vm->invalidated); | |
7fc11959 | 1457 | INIT_LIST_HEAD(&vm->cleared); |
d38ceaf9 | 1458 | INIT_LIST_HEAD(&vm->freed); |
20250215 | 1459 | |
d38ceaf9 AD |
1460 | pd_size = amdgpu_vm_directory_size(adev); |
1461 | pd_entries = amdgpu_vm_num_pdes(adev); | |
1462 | ||
1463 | /* allocate page table array */ | |
9571e1d8 | 1464 | vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); |
d38ceaf9 AD |
1465 | if (vm->page_tables == NULL) { |
1466 | DRM_ERROR("Cannot allocate memory for page table array\n"); | |
1467 | return -ENOMEM; | |
1468 | } | |
1469 | ||
2bd9ccfa | 1470 | /* create scheduler entity for page table updates */ |
2d55e45a CK |
1471 | |
1472 | ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); | |
1473 | ring_instance %= adev->vm_manager.vm_pte_num_rings; | |
1474 | ring = adev->vm_manager.vm_pte_rings[ring_instance]; | |
2bd9ccfa CK |
1475 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; |
1476 | r = amd_sched_entity_init(&ring->sched, &vm->entity, | |
1477 | rq, amdgpu_sched_jobs); | |
1478 | if (r) | |
1479 | return r; | |
1480 | ||
05906dec BN |
1481 | vm->page_directory_fence = NULL; |
1482 | ||
d38ceaf9 | 1483 | r = amdgpu_bo_create(adev, pd_size, align, true, |
857d913d AD |
1484 | AMDGPU_GEM_DOMAIN_VRAM, |
1485 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | |
72d7668b | 1486 | NULL, NULL, &vm->page_directory); |
d38ceaf9 | 1487 | if (r) |
2bd9ccfa CK |
1488 | goto error_free_sched_entity; |
1489 | ||
ef9f0a83 | 1490 | r = amdgpu_bo_reserve(vm->page_directory, false); |
2bd9ccfa CK |
1491 | if (r) |
1492 | goto error_free_page_directory; | |
1493 | ||
1494 | r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); | |
ef9f0a83 | 1495 | amdgpu_bo_unreserve(vm->page_directory); |
2bd9ccfa CK |
1496 | if (r) |
1497 | goto error_free_page_directory; | |
d38ceaf9 AD |
1498 | |
1499 | return 0; | |
2bd9ccfa CK |
1500 | |
1501 | error_free_page_directory: | |
1502 | amdgpu_bo_unref(&vm->page_directory); | |
1503 | vm->page_directory = NULL; | |
1504 | ||
1505 | error_free_sched_entity: | |
1506 | amd_sched_entity_fini(&ring->sched, &vm->entity); | |
1507 | ||
1508 | return r; | |
d38ceaf9 AD |
1509 | } |
1510 | ||
1511 | /** | |
1512 | * amdgpu_vm_fini - tear down a vm instance | |
1513 | * | |
1514 | * @adev: amdgpu_device pointer | |
1515 | * @vm: requested vm | |
1516 | * | |
8843dbbb | 1517 | * Tear down @vm. |
d38ceaf9 AD |
1518 | * Unbind the VM and remove all bos from the vm bo list |
1519 | */ | |
1520 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1521 | { | |
1522 | struct amdgpu_bo_va_mapping *mapping, *tmp; | |
1523 | int i; | |
1524 | ||
2d55e45a | 1525 | amd_sched_entity_fini(vm->entity.sched, &vm->entity); |
2bd9ccfa | 1526 | |
d38ceaf9 AD |
1527 | if (!RB_EMPTY_ROOT(&vm->va)) { |
1528 | dev_err(adev->dev, "still active bo inside vm\n"); | |
1529 | } | |
1530 | rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { | |
1531 | list_del(&mapping->list); | |
1532 | interval_tree_remove(&mapping->it, &vm->va); | |
1533 | kfree(mapping); | |
1534 | } | |
1535 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { | |
1536 | list_del(&mapping->list); | |
1537 | kfree(mapping); | |
1538 | } | |
1539 | ||
1540 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) | |
ee1782c3 | 1541 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); |
9571e1d8 | 1542 | drm_free_large(vm->page_tables); |
d38ceaf9 AD |
1543 | |
1544 | amdgpu_bo_unref(&vm->page_directory); | |
05906dec | 1545 | fence_put(vm->page_directory_fence); |
d38ceaf9 | 1546 | } |
ea89f8c9 | 1547 | |
a9a78b32 CK |
1548 | /** |
1549 | * amdgpu_vm_manager_init - init the VM manager | |
1550 | * | |
1551 | * @adev: amdgpu_device pointer | |
1552 | * | |
1553 | * Initialize the VM manager structures | |
1554 | */ | |
1555 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |
1556 | { | |
1557 | unsigned i; | |
1558 | ||
1559 | INIT_LIST_HEAD(&adev->vm_manager.ids_lru); | |
1560 | ||
1561 | /* skip over VMID 0, since it is the system VM */ | |
971fe9a9 CK |
1562 | for (i = 1; i < adev->vm_manager.num_ids; ++i) { |
1563 | amdgpu_vm_reset_id(adev, i); | |
832a902f | 1564 | amdgpu_sync_create(&adev->vm_manager.ids[i].active); |
a9a78b32 CK |
1565 | list_add_tail(&adev->vm_manager.ids[i].list, |
1566 | &adev->vm_manager.ids_lru); | |
971fe9a9 | 1567 | } |
2d55e45a | 1568 | |
1fbb2e92 CK |
1569 | adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); |
1570 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | |
1571 | adev->vm_manager.seqno[i] = 0; | |
1572 | ||
2d55e45a | 1573 | atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); |
b1c8a81f | 1574 | atomic64_set(&adev->vm_manager.client_counter, 0); |
a9a78b32 CK |
1575 | } |
1576 | ||
ea89f8c9 CK |
1577 | /** |
1578 | * amdgpu_vm_manager_fini - cleanup VM manager | |
1579 | * | |
1580 | * @adev: amdgpu_device pointer | |
1581 | * | |
1582 | * Cleanup the VM manager and free resources. | |
1583 | */ | |
1584 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |
1585 | { | |
1586 | unsigned i; | |
1587 | ||
bcb1ba35 CK |
1588 | for (i = 0; i < AMDGPU_NUM_VM; ++i) { |
1589 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; | |
1590 | ||
832a902f CK |
1591 | fence_put(adev->vm_manager.ids[i].first); |
1592 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); | |
bcb1ba35 CK |
1593 | fence_put(id->flushed_updates); |
1594 | } | |
ea89f8c9 | 1595 | } |