]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_trace.h" | |
32 | ||
33 | /* | |
34 | * GPUVM | |
35 | * GPUVM is similar to the legacy gart on older asics, however | |
36 | * rather than there being a single global gart table | |
37 | * for the entire GPU, there are multiple VM page tables active | |
38 | * at any given time. The VM page tables can contain a mix | |
39 | * vram pages and system memory pages and system memory pages | |
40 | * can be mapped as snooped (cached system pages) or unsnooped | |
41 | * (uncached system pages). | |
42 | * Each VM has an ID associated with it and there is a page table | |
43 | * associated with each VMID. When execting a command buffer, | |
44 | * the kernel tells the the ring what VMID to use for that command | |
45 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
46 | * The userspace drivers maintain their own address space and the kernel | |
47 | * sets up their pages tables accordingly when they submit their | |
48 | * command buffers and a VMID is assigned. | |
49 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
50 | * SI supports 16. | |
51 | */ | |
52 | ||
53 | /** | |
54 | * amdgpu_vm_num_pde - return the number of page directory entries | |
55 | * | |
56 | * @adev: amdgpu_device pointer | |
57 | * | |
58 | * Calculate the number of page directory entries (cayman+). | |
59 | */ | |
60 | static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) | |
61 | { | |
62 | return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; | |
63 | } | |
64 | ||
65 | /** | |
66 | * amdgpu_vm_directory_size - returns the size of the page directory in bytes | |
67 | * | |
68 | * @adev: amdgpu_device pointer | |
69 | * | |
70 | * Calculate the size of the page directory in bytes (cayman+). | |
71 | */ | |
72 | static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) | |
73 | { | |
74 | return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); | |
75 | } | |
76 | ||
77 | /** | |
78 | * amdgpu_vm_get_bos - add the vm BOs to a validation list | |
79 | * | |
80 | * @vm: vm providing the BOs | |
81 | * @head: head of validation list | |
82 | * | |
83 | * Add the page directory to the list of BOs to | |
84 | * validate for command submission (cayman+). | |
85 | */ | |
86 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | |
87 | struct amdgpu_vm *vm, | |
88 | struct list_head *head) | |
89 | { | |
90 | struct amdgpu_bo_list_entry *list; | |
91 | unsigned i, idx; | |
92 | ||
93 | mutex_lock(&vm->mutex); | |
94 | list = drm_malloc_ab(vm->max_pde_used + 2, | |
95 | sizeof(struct amdgpu_bo_list_entry)); | |
96 | if (!list) { | |
97 | mutex_unlock(&vm->mutex); | |
98 | return NULL; | |
99 | } | |
100 | ||
101 | /* add the vm page table to the list */ | |
102 | list[0].robj = vm->page_directory; | |
103 | list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
104 | list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
105 | list[0].priority = 0; | |
106 | list[0].tv.bo = &vm->page_directory->tbo; | |
107 | list[0].tv.shared = true; | |
108 | list_add(&list[0].tv.head, head); | |
109 | ||
110 | for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { | |
111 | if (!vm->page_tables[i].bo) | |
112 | continue; | |
113 | ||
114 | list[idx].robj = vm->page_tables[i].bo; | |
115 | list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
116 | list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM; | |
117 | list[idx].priority = 0; | |
118 | list[idx].tv.bo = &list[idx].robj->tbo; | |
119 | list[idx].tv.shared = true; | |
120 | list_add(&list[idx++].tv.head, head); | |
121 | } | |
122 | mutex_unlock(&vm->mutex); | |
123 | ||
124 | return list; | |
125 | } | |
126 | ||
127 | /** | |
128 | * amdgpu_vm_grab_id - allocate the next free VMID | |
129 | * | |
130 | * @ring: ring we want to submit job to | |
131 | * @vm: vm to allocate id for | |
132 | * | |
133 | * Allocate an id for the vm (cayman+). | |
134 | * Returns the fence we need to sync to (if any). | |
135 | * | |
136 | * Global and local mutex must be locked! | |
137 | */ | |
138 | struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, | |
139 | struct amdgpu_vm *vm) | |
140 | { | |
141 | struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {}; | |
142 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | |
143 | struct amdgpu_device *adev = ring->adev; | |
144 | ||
145 | unsigned choices[2] = {}; | |
146 | unsigned i; | |
147 | ||
148 | /* check if the id is still valid */ | |
149 | if (vm_id->id && vm_id->last_id_use && | |
150 | vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) | |
151 | return NULL; | |
152 | ||
153 | /* we definately need to flush */ | |
154 | vm_id->pd_gpu_addr = ~0ll; | |
155 | ||
156 | /* skip over VMID 0, since it is the system VM */ | |
157 | for (i = 1; i < adev->vm_manager.nvm; ++i) { | |
158 | struct amdgpu_fence *fence = adev->vm_manager.active[i]; | |
159 | ||
160 | if (fence == NULL) { | |
161 | /* found a free one */ | |
162 | vm_id->id = i; | |
163 | trace_amdgpu_vm_grab_id(i, ring->idx); | |
164 | return NULL; | |
165 | } | |
166 | ||
167 | if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) { | |
168 | best[fence->ring->idx] = fence; | |
169 | choices[fence->ring == ring ? 0 : 1] = i; | |
170 | } | |
171 | } | |
172 | ||
173 | for (i = 0; i < 2; ++i) { | |
174 | if (choices[i]) { | |
175 | vm_id->id = choices[i]; | |
176 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); | |
177 | return adev->vm_manager.active[choices[i]]; | |
178 | } | |
179 | } | |
180 | ||
181 | /* should never happen */ | |
182 | BUG(); | |
183 | return NULL; | |
184 | } | |
185 | ||
186 | /** | |
187 | * amdgpu_vm_flush - hardware flush the vm | |
188 | * | |
189 | * @ring: ring to use for flush | |
190 | * @vm: vm we want to flush | |
191 | * @updates: last vm update that we waited for | |
192 | * | |
193 | * Flush the vm (cayman+). | |
194 | * | |
195 | * Global and local mutex must be locked! | |
196 | */ | |
197 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | |
198 | struct amdgpu_vm *vm, | |
199 | struct amdgpu_fence *updates) | |
200 | { | |
201 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | |
202 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | |
203 | ||
204 | if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || | |
205 | amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) { | |
206 | ||
207 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); | |
208 | amdgpu_fence_unref(&vm_id->flushed_updates); | |
209 | vm_id->flushed_updates = amdgpu_fence_ref(updates); | |
210 | vm_id->pd_gpu_addr = pd_addr; | |
211 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); | |
212 | } | |
213 | } | |
214 | ||
215 | /** | |
216 | * amdgpu_vm_fence - remember fence for vm | |
217 | * | |
218 | * @adev: amdgpu_device pointer | |
219 | * @vm: vm we want to fence | |
220 | * @fence: fence to remember | |
221 | * | |
222 | * Fence the vm (cayman+). | |
223 | * Set the fence used to protect page table and id. | |
224 | * | |
225 | * Global and local mutex must be locked! | |
226 | */ | |
227 | void amdgpu_vm_fence(struct amdgpu_device *adev, | |
228 | struct amdgpu_vm *vm, | |
229 | struct amdgpu_fence *fence) | |
230 | { | |
231 | unsigned ridx = fence->ring->idx; | |
232 | unsigned vm_id = vm->ids[ridx].id; | |
233 | ||
234 | amdgpu_fence_unref(&adev->vm_manager.active[vm_id]); | |
235 | adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence); | |
236 | ||
237 | amdgpu_fence_unref(&vm->ids[ridx].last_id_use); | |
238 | vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence); | |
239 | } | |
240 | ||
241 | /** | |
242 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | |
243 | * | |
244 | * @vm: requested vm | |
245 | * @bo: requested buffer object | |
246 | * | |
247 | * Find @bo inside the requested vm (cayman+). | |
248 | * Search inside the @bos vm list for the requested vm | |
249 | * Returns the found bo_va or NULL if none is found | |
250 | * | |
251 | * Object has to be reserved! | |
252 | */ | |
253 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |
254 | struct amdgpu_bo *bo) | |
255 | { | |
256 | struct amdgpu_bo_va *bo_va; | |
257 | ||
258 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
259 | if (bo_va->vm == vm) { | |
260 | return bo_va; | |
261 | } | |
262 | } | |
263 | return NULL; | |
264 | } | |
265 | ||
266 | /** | |
267 | * amdgpu_vm_update_pages - helper to call the right asic function | |
268 | * | |
269 | * @adev: amdgpu_device pointer | |
270 | * @ib: indirect buffer to fill with commands | |
271 | * @pe: addr of the page entry | |
272 | * @addr: dst addr to write into pe | |
273 | * @count: number of page entries to update | |
274 | * @incr: increase next addr by incr bytes | |
275 | * @flags: hw access flags | |
276 | * @gtt_flags: GTT hw access flags | |
277 | * | |
278 | * Traces the parameters and calls the right asic functions | |
279 | * to setup the page table using the DMA. | |
280 | */ | |
281 | static void amdgpu_vm_update_pages(struct amdgpu_device *adev, | |
282 | struct amdgpu_ib *ib, | |
283 | uint64_t pe, uint64_t addr, | |
284 | unsigned count, uint32_t incr, | |
285 | uint32_t flags, uint32_t gtt_flags) | |
286 | { | |
287 | trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); | |
288 | ||
289 | if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { | |
290 | uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; | |
291 | amdgpu_vm_copy_pte(adev, ib, pe, src, count); | |
292 | ||
293 | } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) { | |
294 | amdgpu_vm_write_pte(adev, ib, pe, addr, | |
295 | count, incr, flags); | |
296 | ||
297 | } else { | |
298 | amdgpu_vm_set_pte_pde(adev, ib, pe, addr, | |
299 | count, incr, flags); | |
300 | } | |
301 | } | |
302 | ||
303 | /** | |
304 | * amdgpu_vm_clear_bo - initially clear the page dir/table | |
305 | * | |
306 | * @adev: amdgpu_device pointer | |
307 | * @bo: bo to clear | |
308 | */ | |
309 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |
310 | struct amdgpu_bo *bo) | |
311 | { | |
312 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | |
313 | struct amdgpu_ib ib; | |
314 | unsigned entries; | |
315 | uint64_t addr; | |
316 | int r; | |
317 | ||
318 | r = amdgpu_bo_reserve(bo, false); | |
319 | if (r) | |
320 | return r; | |
321 | ||
322 | r = reservation_object_reserve_shared(bo->tbo.resv); | |
323 | if (r) | |
324 | return r; | |
325 | ||
326 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
327 | if (r) | |
328 | goto error_unreserve; | |
329 | ||
330 | addr = amdgpu_bo_gpu_offset(bo); | |
331 | entries = amdgpu_bo_size(bo) / 8; | |
332 | ||
333 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib); | |
334 | if (r) | |
335 | goto error_unreserve; | |
336 | ||
337 | ib.length_dw = 0; | |
338 | ||
339 | amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0); | |
340 | amdgpu_vm_pad_ib(adev, &ib); | |
341 | WARN_ON(ib.length_dw > 64); | |
342 | ||
343 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); | |
344 | if (r) | |
345 | goto error_free; | |
346 | ||
347 | amdgpu_bo_fence(bo, ib.fence, false); | |
348 | ||
349 | error_free: | |
350 | amdgpu_ib_free(adev, &ib); | |
351 | ||
352 | error_unreserve: | |
353 | amdgpu_bo_unreserve(bo); | |
354 | return r; | |
355 | } | |
356 | ||
357 | /** | |
358 | * amdgpu_vm_map_gart - get the physical address of a gart page | |
359 | * | |
360 | * @adev: amdgpu_device pointer | |
361 | * @addr: the unmapped addr | |
362 | * | |
363 | * Look up the physical address of the page that the pte resolves | |
364 | * to (cayman+). | |
365 | * Returns the physical address of the page. | |
366 | */ | |
367 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) | |
368 | { | |
369 | uint64_t result; | |
370 | ||
371 | /* page table offset */ | |
372 | result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; | |
373 | ||
374 | /* in case cpu page size != gpu page size*/ | |
375 | result |= addr & (~PAGE_MASK); | |
376 | ||
377 | return result; | |
378 | } | |
379 | ||
380 | /** | |
381 | * amdgpu_vm_update_pdes - make sure that page directory is valid | |
382 | * | |
383 | * @adev: amdgpu_device pointer | |
384 | * @vm: requested vm | |
385 | * @start: start of GPU address range | |
386 | * @end: end of GPU address range | |
387 | * | |
388 | * Allocates new page tables if necessary | |
389 | * and updates the page directory (cayman+). | |
390 | * Returns 0 for success, error for failure. | |
391 | * | |
392 | * Global and local mutex must be locked! | |
393 | */ | |
394 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |
395 | struct amdgpu_vm *vm) | |
396 | { | |
397 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | |
398 | struct amdgpu_bo *pd = vm->page_directory; | |
399 | uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); | |
400 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | |
401 | uint64_t last_pde = ~0, last_pt = ~0; | |
402 | unsigned count = 0, pt_idx, ndw; | |
403 | struct amdgpu_ib ib; | |
404 | int r; | |
405 | ||
406 | /* padding, etc. */ | |
407 | ndw = 64; | |
408 | ||
409 | /* assume the worst case */ | |
410 | ndw += vm->max_pde_used * 6; | |
411 | ||
412 | /* update too big for an IB */ | |
413 | if (ndw > 0xfffff) | |
414 | return -ENOMEM; | |
415 | ||
416 | r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); | |
417 | if (r) | |
418 | return r; | |
419 | ib.length_dw = 0; | |
420 | ||
421 | /* walk over the address space and update the page directory */ | |
422 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | |
423 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; | |
424 | uint64_t pde, pt; | |
425 | ||
426 | if (bo == NULL) | |
427 | continue; | |
428 | ||
429 | pt = amdgpu_bo_gpu_offset(bo); | |
430 | if (vm->page_tables[pt_idx].addr == pt) | |
431 | continue; | |
432 | vm->page_tables[pt_idx].addr = pt; | |
433 | ||
434 | pde = pd_addr + pt_idx * 8; | |
435 | if (((last_pde + 8 * count) != pde) || | |
436 | ((last_pt + incr * count) != pt)) { | |
437 | ||
438 | if (count) { | |
439 | amdgpu_vm_update_pages(adev, &ib, last_pde, | |
440 | last_pt, count, incr, | |
441 | AMDGPU_PTE_VALID, 0); | |
442 | } | |
443 | ||
444 | count = 1; | |
445 | last_pde = pde; | |
446 | last_pt = pt; | |
447 | } else { | |
448 | ++count; | |
449 | } | |
450 | } | |
451 | ||
452 | if (count) | |
453 | amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count, | |
454 | incr, AMDGPU_PTE_VALID, 0); | |
455 | ||
456 | if (ib.length_dw != 0) { | |
457 | amdgpu_vm_pad_ib(adev, &ib); | |
458 | amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); | |
459 | WARN_ON(ib.length_dw > ndw); | |
460 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); | |
461 | if (r) { | |
462 | amdgpu_ib_free(adev, &ib); | |
463 | return r; | |
464 | } | |
465 | amdgpu_bo_fence(pd, ib.fence, false); | |
466 | } | |
467 | amdgpu_ib_free(adev, &ib); | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | /** | |
473 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | |
474 | * | |
475 | * @adev: amdgpu_device pointer | |
476 | * @ib: IB for the update | |
477 | * @pe_start: first PTE to handle | |
478 | * @pe_end: last PTE to handle | |
479 | * @addr: addr those PTEs should point to | |
480 | * @flags: hw mapping flags | |
481 | * @gtt_flags: GTT hw mapping flags | |
482 | * | |
483 | * Global and local mutex must be locked! | |
484 | */ | |
485 | static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, | |
486 | struct amdgpu_ib *ib, | |
487 | uint64_t pe_start, uint64_t pe_end, | |
488 | uint64_t addr, uint32_t flags, | |
489 | uint32_t gtt_flags) | |
490 | { | |
491 | /** | |
492 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
493 | * field in the PTE. When this field is set to a non-zero value, page | |
494 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
495 | * flags are considered valid for all PTEs within the fragment range | |
496 | * and corresponding mappings are assumed to be physically contiguous. | |
497 | * | |
498 | * The L1 TLB can store a single PTE for the whole fragment, | |
499 | * significantly increasing the space available for translation | |
500 | * caching. This leads to large improvements in throughput when the | |
501 | * TLB is under pressure. | |
502 | * | |
503 | * The L2 TLB distributes small and large fragments into two | |
504 | * asymmetric partitions. The large fragment cache is significantly | |
505 | * larger. Thus, we try to use large fragments wherever possible. | |
506 | * Userspace can support this by aligning virtual base address and | |
507 | * allocation size to the fragment size. | |
508 | */ | |
509 | ||
510 | /* SI and newer are optimized for 64KB */ | |
511 | uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; | |
512 | uint64_t frag_align = 0x80; | |
513 | ||
514 | uint64_t frag_start = ALIGN(pe_start, frag_align); | |
515 | uint64_t frag_end = pe_end & ~(frag_align - 1); | |
516 | ||
517 | unsigned count; | |
518 | ||
519 | /* system pages are non continuously */ | |
520 | if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) || | |
521 | (frag_start >= frag_end)) { | |
522 | ||
523 | count = (pe_end - pe_start) / 8; | |
524 | amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, | |
525 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | |
526 | return; | |
527 | } | |
528 | ||
529 | /* handle the 4K area at the beginning */ | |
530 | if (pe_start != frag_start) { | |
531 | count = (frag_start - pe_start) / 8; | |
532 | amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, | |
533 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | |
534 | addr += AMDGPU_GPU_PAGE_SIZE * count; | |
535 | } | |
536 | ||
537 | /* handle the area in the middle */ | |
538 | count = (frag_end - frag_start) / 8; | |
539 | amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, | |
540 | AMDGPU_GPU_PAGE_SIZE, flags | frag_flags, | |
541 | gtt_flags); | |
542 | ||
543 | /* handle the 4K area at the end */ | |
544 | if (frag_end != pe_end) { | |
545 | addr += AMDGPU_GPU_PAGE_SIZE * count; | |
546 | count = (pe_end - frag_end) / 8; | |
547 | amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, | |
548 | AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags); | |
549 | } | |
550 | } | |
551 | ||
552 | /** | |
553 | * amdgpu_vm_update_ptes - make sure that page tables are valid | |
554 | * | |
555 | * @adev: amdgpu_device pointer | |
556 | * @vm: requested vm | |
557 | * @start: start of GPU address range | |
558 | * @end: end of GPU address range | |
559 | * @dst: destination address to map to | |
560 | * @flags: mapping flags | |
561 | * | |
562 | * Update the page tables in the range @start - @end (cayman+). | |
563 | * | |
564 | * Global and local mutex must be locked! | |
565 | */ | |
566 | static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, | |
567 | struct amdgpu_vm *vm, | |
568 | struct amdgpu_ib *ib, | |
569 | uint64_t start, uint64_t end, | |
570 | uint64_t dst, uint32_t flags, | |
571 | uint32_t gtt_flags) | |
572 | { | |
573 | uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; | |
574 | uint64_t last_pte = ~0, last_dst = ~0; | |
575 | unsigned count = 0; | |
576 | uint64_t addr; | |
577 | ||
578 | /* walk over the address space and update the page tables */ | |
579 | for (addr = start; addr < end; ) { | |
580 | uint64_t pt_idx = addr >> amdgpu_vm_block_size; | |
581 | struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; | |
582 | unsigned nptes; | |
583 | uint64_t pte; | |
584 | int r; | |
585 | ||
586 | amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, | |
587 | AMDGPU_FENCE_OWNER_VM); | |
588 | r = reservation_object_reserve_shared(pt->tbo.resv); | |
589 | if (r) | |
590 | return r; | |
591 | ||
592 | if ((addr & ~mask) == (end & ~mask)) | |
593 | nptes = end - addr; | |
594 | else | |
595 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | |
596 | ||
597 | pte = amdgpu_bo_gpu_offset(pt); | |
598 | pte += (addr & mask) * 8; | |
599 | ||
600 | if ((last_pte + 8 * count) != pte) { | |
601 | ||
602 | if (count) { | |
603 | amdgpu_vm_frag_ptes(adev, ib, last_pte, | |
604 | last_pte + 8 * count, | |
605 | last_dst, flags, | |
606 | gtt_flags); | |
607 | } | |
608 | ||
609 | count = nptes; | |
610 | last_pte = pte; | |
611 | last_dst = dst; | |
612 | } else { | |
613 | count += nptes; | |
614 | } | |
615 | ||
616 | addr += nptes; | |
617 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | |
618 | } | |
619 | ||
620 | if (count) { | |
621 | amdgpu_vm_frag_ptes(adev, ib, last_pte, | |
622 | last_pte + 8 * count, | |
623 | last_dst, flags, gtt_flags); | |
624 | } | |
625 | ||
626 | return 0; | |
627 | } | |
628 | ||
629 | /** | |
630 | * amdgpu_vm_fence_pts - fence page tables after an update | |
631 | * | |
632 | * @vm: requested vm | |
633 | * @start: start of GPU address range | |
634 | * @end: end of GPU address range | |
635 | * @fence: fence to use | |
636 | * | |
637 | * Fence the page tables in the range @start - @end (cayman+). | |
638 | * | |
639 | * Global and local mutex must be locked! | |
640 | */ | |
641 | static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, | |
642 | uint64_t start, uint64_t end, | |
643 | struct amdgpu_fence *fence) | |
644 | { | |
645 | unsigned i; | |
646 | ||
647 | start >>= amdgpu_vm_block_size; | |
648 | end >>= amdgpu_vm_block_size; | |
649 | ||
650 | for (i = start; i <= end; ++i) | |
651 | amdgpu_bo_fence(vm->page_tables[i].bo, fence, true); | |
652 | } | |
653 | ||
654 | /** | |
655 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | |
656 | * | |
657 | * @adev: amdgpu_device pointer | |
658 | * @vm: requested vm | |
659 | * @mapping: mapped range and flags to use for the update | |
660 | * @addr: addr to set the area to | |
661 | * @gtt_flags: flags as they are used for GTT | |
662 | * @fence: optional resulting fence | |
663 | * | |
664 | * Fill in the page table entries for @mapping. | |
665 | * Returns 0 for success, -EINVAL for failure. | |
666 | * | |
667 | * Object have to be reserved and mutex must be locked! | |
668 | */ | |
669 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |
670 | struct amdgpu_vm *vm, | |
671 | struct amdgpu_bo_va_mapping *mapping, | |
672 | uint64_t addr, uint32_t gtt_flags, | |
673 | struct amdgpu_fence **fence) | |
674 | { | |
675 | struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; | |
676 | unsigned nptes, ncmds, ndw; | |
677 | uint32_t flags = gtt_flags; | |
678 | struct amdgpu_ib ib; | |
679 | int r; | |
680 | ||
681 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | |
682 | * but in case of something, we filter the flags in first place | |
683 | */ | |
684 | if (!(mapping->flags & AMDGPU_PTE_READABLE)) | |
685 | flags &= ~AMDGPU_PTE_READABLE; | |
686 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) | |
687 | flags &= ~AMDGPU_PTE_WRITEABLE; | |
688 | ||
689 | trace_amdgpu_vm_bo_update(mapping); | |
690 | ||
691 | nptes = mapping->it.last - mapping->it.start + 1; | |
692 | ||
693 | /* | |
694 | * reserve space for one command every (1 << BLOCK_SIZE) | |
695 | * entries or 2k dwords (whatever is smaller) | |
696 | */ | |
697 | ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; | |
698 | ||
699 | /* padding, etc. */ | |
700 | ndw = 64; | |
701 | ||
702 | if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) { | |
703 | /* only copy commands needed */ | |
704 | ndw += ncmds * 7; | |
705 | ||
706 | } else if (flags & AMDGPU_PTE_SYSTEM) { | |
707 | /* header for write data commands */ | |
708 | ndw += ncmds * 4; | |
709 | ||
710 | /* body of write data command */ | |
711 | ndw += nptes * 2; | |
712 | ||
713 | } else { | |
714 | /* set page commands needed */ | |
715 | ndw += ncmds * 10; | |
716 | ||
717 | /* two extra commands for begin/end of fragment */ | |
718 | ndw += 2 * 10; | |
719 | } | |
720 | ||
721 | /* update too big for an IB */ | |
722 | if (ndw > 0xfffff) | |
723 | return -ENOMEM; | |
724 | ||
725 | r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib); | |
726 | if (r) | |
727 | return r; | |
728 | ib.length_dw = 0; | |
729 | ||
730 | if (!(flags & AMDGPU_PTE_VALID)) { | |
731 | unsigned i; | |
732 | ||
733 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
734 | struct amdgpu_fence *f = vm->ids[i].last_id_use; | |
735 | amdgpu_sync_fence(&ib.sync, f); | |
736 | } | |
737 | } | |
738 | ||
739 | r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start, | |
740 | mapping->it.last + 1, addr + mapping->offset, | |
741 | flags, gtt_flags); | |
742 | ||
743 | if (r) { | |
744 | amdgpu_ib_free(adev, &ib); | |
745 | return r; | |
746 | } | |
747 | ||
748 | amdgpu_vm_pad_ib(adev, &ib); | |
749 | WARN_ON(ib.length_dw > ndw); | |
750 | ||
751 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM); | |
752 | if (r) { | |
753 | amdgpu_ib_free(adev, &ib); | |
754 | return r; | |
755 | } | |
756 | amdgpu_vm_fence_pts(vm, mapping->it.start, | |
757 | mapping->it.last + 1, ib.fence); | |
758 | if (fence) { | |
759 | amdgpu_fence_unref(fence); | |
760 | *fence = amdgpu_fence_ref(ib.fence); | |
761 | } | |
762 | amdgpu_ib_free(adev, &ib); | |
763 | ||
764 | return 0; | |
765 | } | |
766 | ||
767 | /** | |
768 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table | |
769 | * | |
770 | * @adev: amdgpu_device pointer | |
771 | * @bo_va: requested BO and VM object | |
772 | * @mem: ttm mem | |
773 | * | |
774 | * Fill in the page table entries for @bo_va. | |
775 | * Returns 0 for success, -EINVAL for failure. | |
776 | * | |
777 | * Object have to be reserved and mutex must be locked! | |
778 | */ | |
779 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |
780 | struct amdgpu_bo_va *bo_va, | |
781 | struct ttm_mem_reg *mem) | |
782 | { | |
783 | struct amdgpu_vm *vm = bo_va->vm; | |
784 | struct amdgpu_bo_va_mapping *mapping; | |
785 | uint32_t flags; | |
786 | uint64_t addr; | |
787 | int r; | |
788 | ||
789 | if (mem) { | |
790 | addr = mem->start << PAGE_SHIFT; | |
791 | if (mem->mem_type != TTM_PL_TT) | |
792 | addr += adev->vm_manager.vram_base_offset; | |
793 | } else { | |
794 | addr = 0; | |
795 | } | |
796 | ||
797 | if (addr == bo_va->addr) | |
798 | return 0; | |
799 | ||
800 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); | |
801 | ||
802 | list_for_each_entry(mapping, &bo_va->mappings, list) { | |
803 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, | |
804 | flags, &bo_va->last_pt_update); | |
805 | if (r) | |
806 | return r; | |
807 | } | |
808 | ||
809 | bo_va->addr = addr; | |
810 | spin_lock(&vm->status_lock); | |
811 | list_del_init(&bo_va->vm_status); | |
812 | spin_unlock(&vm->status_lock); | |
813 | ||
814 | return 0; | |
815 | } | |
816 | ||
817 | /** | |
818 | * amdgpu_vm_clear_freed - clear freed BOs in the PT | |
819 | * | |
820 | * @adev: amdgpu_device pointer | |
821 | * @vm: requested vm | |
822 | * | |
823 | * Make sure all freed BOs are cleared in the PT. | |
824 | * Returns 0 for success. | |
825 | * | |
826 | * PTs have to be reserved and mutex must be locked! | |
827 | */ | |
828 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |
829 | struct amdgpu_vm *vm) | |
830 | { | |
831 | struct amdgpu_bo_va_mapping *mapping; | |
832 | int r; | |
833 | ||
834 | while (!list_empty(&vm->freed)) { | |
835 | mapping = list_first_entry(&vm->freed, | |
836 | struct amdgpu_bo_va_mapping, list); | |
837 | list_del(&mapping->list); | |
838 | ||
839 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); | |
840 | kfree(mapping); | |
841 | if (r) | |
842 | return r; | |
843 | ||
844 | } | |
845 | return 0; | |
846 | ||
847 | } | |
848 | ||
849 | /** | |
850 | * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT | |
851 | * | |
852 | * @adev: amdgpu_device pointer | |
853 | * @vm: requested vm | |
854 | * | |
855 | * Make sure all invalidated BOs are cleared in the PT. | |
856 | * Returns 0 for success. | |
857 | * | |
858 | * PTs have to be reserved and mutex must be locked! | |
859 | */ | |
860 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |
861 | struct amdgpu_vm *vm) | |
862 | { | |
863 | struct amdgpu_bo_va *bo_va; | |
864 | int r; | |
865 | ||
866 | spin_lock(&vm->status_lock); | |
867 | while (!list_empty(&vm->invalidated)) { | |
868 | bo_va = list_first_entry(&vm->invalidated, | |
869 | struct amdgpu_bo_va, vm_status); | |
870 | spin_unlock(&vm->status_lock); | |
871 | ||
872 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); | |
873 | if (r) | |
874 | return r; | |
875 | ||
876 | spin_lock(&vm->status_lock); | |
877 | } | |
878 | spin_unlock(&vm->status_lock); | |
879 | ||
880 | return 0; | |
881 | } | |
882 | ||
883 | /** | |
884 | * amdgpu_vm_bo_add - add a bo to a specific vm | |
885 | * | |
886 | * @adev: amdgpu_device pointer | |
887 | * @vm: requested vm | |
888 | * @bo: amdgpu buffer object | |
889 | * | |
890 | * Add @bo into the requested vm (cayman+). | |
891 | * Add @bo to the list of bos associated with the vm | |
892 | * Returns newly added bo_va or NULL for failure | |
893 | * | |
894 | * Object has to be reserved! | |
895 | */ | |
896 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |
897 | struct amdgpu_vm *vm, | |
898 | struct amdgpu_bo *bo) | |
899 | { | |
900 | struct amdgpu_bo_va *bo_va; | |
901 | ||
902 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); | |
903 | if (bo_va == NULL) { | |
904 | return NULL; | |
905 | } | |
906 | bo_va->vm = vm; | |
907 | bo_va->bo = bo; | |
908 | bo_va->addr = 0; | |
909 | bo_va->ref_count = 1; | |
910 | INIT_LIST_HEAD(&bo_va->bo_list); | |
911 | INIT_LIST_HEAD(&bo_va->mappings); | |
912 | INIT_LIST_HEAD(&bo_va->vm_status); | |
913 | ||
914 | mutex_lock(&vm->mutex); | |
915 | list_add_tail(&bo_va->bo_list, &bo->va); | |
916 | mutex_unlock(&vm->mutex); | |
917 | ||
918 | return bo_va; | |
919 | } | |
920 | ||
921 | /** | |
922 | * amdgpu_vm_bo_map - map bo inside a vm | |
923 | * | |
924 | * @adev: amdgpu_device pointer | |
925 | * @bo_va: bo_va to store the address | |
926 | * @saddr: where to map the BO | |
927 | * @offset: requested offset in the BO | |
928 | * @flags: attributes of pages (read/write/valid/etc.) | |
929 | * | |
930 | * Add a mapping of the BO at the specefied addr into the VM. | |
931 | * Returns 0 for success, error for failure. | |
932 | * | |
933 | * Object has to be reserved and gets unreserved by this function! | |
934 | */ | |
935 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |
936 | struct amdgpu_bo_va *bo_va, | |
937 | uint64_t saddr, uint64_t offset, | |
938 | uint64_t size, uint32_t flags) | |
939 | { | |
940 | struct amdgpu_bo_va_mapping *mapping; | |
941 | struct amdgpu_vm *vm = bo_va->vm; | |
942 | struct interval_tree_node *it; | |
943 | unsigned last_pfn, pt_idx; | |
944 | uint64_t eaddr; | |
945 | int r; | |
946 | ||
947 | /* validate the parameters */ | |
948 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | |
949 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) { | |
950 | amdgpu_bo_unreserve(bo_va->bo); | |
951 | return -EINVAL; | |
952 | } | |
953 | ||
954 | /* make sure object fit at this offset */ | |
955 | eaddr = saddr + size; | |
956 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { | |
957 | amdgpu_bo_unreserve(bo_va->bo); | |
958 | return -EINVAL; | |
959 | } | |
960 | ||
961 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | |
962 | if (last_pfn > adev->vm_manager.max_pfn) { | |
963 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", | |
964 | last_pfn, adev->vm_manager.max_pfn); | |
965 | amdgpu_bo_unreserve(bo_va->bo); | |
966 | return -EINVAL; | |
967 | } | |
968 | ||
969 | mutex_lock(&vm->mutex); | |
970 | ||
971 | saddr /= AMDGPU_GPU_PAGE_SIZE; | |
972 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
973 | ||
974 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); | |
975 | if (it) { | |
976 | struct amdgpu_bo_va_mapping *tmp; | |
977 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | |
978 | /* bo and tmp overlap, invalid addr */ | |
979 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | |
980 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | |
981 | tmp->it.start, tmp->it.last + 1); | |
982 | amdgpu_bo_unreserve(bo_va->bo); | |
983 | r = -EINVAL; | |
984 | goto error_unlock; | |
985 | } | |
986 | ||
987 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | |
988 | if (!mapping) { | |
989 | amdgpu_bo_unreserve(bo_va->bo); | |
990 | r = -ENOMEM; | |
991 | goto error_unlock; | |
992 | } | |
993 | ||
994 | INIT_LIST_HEAD(&mapping->list); | |
995 | mapping->it.start = saddr; | |
996 | mapping->it.last = eaddr - 1; | |
997 | mapping->offset = offset; | |
998 | mapping->flags = flags; | |
999 | ||
1000 | list_add(&mapping->list, &bo_va->mappings); | |
1001 | interval_tree_insert(&mapping->it, &vm->va); | |
1002 | ||
1003 | /* Make sure the page tables are allocated */ | |
1004 | saddr >>= amdgpu_vm_block_size; | |
1005 | eaddr >>= amdgpu_vm_block_size; | |
1006 | ||
1007 | BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); | |
1008 | ||
1009 | if (eaddr > vm->max_pde_used) | |
1010 | vm->max_pde_used = eaddr; | |
1011 | ||
1012 | amdgpu_bo_unreserve(bo_va->bo); | |
1013 | ||
1014 | /* walk over the address space and allocate the page tables */ | |
1015 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | |
1016 | struct amdgpu_bo *pt; | |
1017 | ||
1018 | if (vm->page_tables[pt_idx].bo) | |
1019 | continue; | |
1020 | ||
1021 | /* drop mutex to allocate and clear page table */ | |
1022 | mutex_unlock(&vm->mutex); | |
1023 | ||
1024 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | |
1025 | AMDGPU_GPU_PAGE_SIZE, true, | |
1026 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt); | |
1027 | if (r) | |
1028 | goto error_free; | |
1029 | ||
1030 | r = amdgpu_vm_clear_bo(adev, pt); | |
1031 | if (r) { | |
1032 | amdgpu_bo_unref(&pt); | |
1033 | goto error_free; | |
1034 | } | |
1035 | ||
1036 | /* aquire mutex again */ | |
1037 | mutex_lock(&vm->mutex); | |
1038 | if (vm->page_tables[pt_idx].bo) { | |
1039 | /* someone else allocated the pt in the meantime */ | |
1040 | mutex_unlock(&vm->mutex); | |
1041 | amdgpu_bo_unref(&pt); | |
1042 | mutex_lock(&vm->mutex); | |
1043 | continue; | |
1044 | } | |
1045 | ||
1046 | vm->page_tables[pt_idx].addr = 0; | |
1047 | vm->page_tables[pt_idx].bo = pt; | |
1048 | } | |
1049 | ||
1050 | mutex_unlock(&vm->mutex); | |
1051 | return 0; | |
1052 | ||
1053 | error_free: | |
1054 | mutex_lock(&vm->mutex); | |
1055 | list_del(&mapping->list); | |
1056 | interval_tree_remove(&mapping->it, &vm->va); | |
1057 | kfree(mapping); | |
1058 | ||
1059 | error_unlock: | |
1060 | mutex_unlock(&vm->mutex); | |
1061 | return r; | |
1062 | } | |
1063 | ||
1064 | /** | |
1065 | * amdgpu_vm_bo_unmap - remove bo mapping from vm | |
1066 | * | |
1067 | * @adev: amdgpu_device pointer | |
1068 | * @bo_va: bo_va to remove the address from | |
1069 | * @saddr: where to the BO is mapped | |
1070 | * | |
1071 | * Remove a mapping of the BO at the specefied addr from the VM. | |
1072 | * Returns 0 for success, error for failure. | |
1073 | * | |
1074 | * Object has to be reserved and gets unreserved by this function! | |
1075 | */ | |
1076 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |
1077 | struct amdgpu_bo_va *bo_va, | |
1078 | uint64_t saddr) | |
1079 | { | |
1080 | struct amdgpu_bo_va_mapping *mapping; | |
1081 | struct amdgpu_vm *vm = bo_va->vm; | |
1082 | ||
1083 | list_for_each_entry(mapping, &bo_va->mappings, list) { | |
1084 | if (mapping->it.start == saddr) | |
1085 | break; | |
1086 | } | |
1087 | ||
1088 | if (&mapping->list == &bo_va->mappings) { | |
1089 | amdgpu_bo_unreserve(bo_va->bo); | |
1090 | return -ENOENT; | |
1091 | } | |
1092 | ||
1093 | mutex_lock(&vm->mutex); | |
1094 | list_del(&mapping->list); | |
1095 | interval_tree_remove(&mapping->it, &vm->va); | |
1096 | ||
1097 | if (bo_va->addr) { | |
1098 | /* clear the old address */ | |
1099 | list_add(&mapping->list, &vm->freed); | |
1100 | } else { | |
1101 | kfree(mapping); | |
1102 | } | |
1103 | mutex_unlock(&vm->mutex); | |
1104 | amdgpu_bo_unreserve(bo_va->bo); | |
1105 | ||
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | /** | |
1110 | * amdgpu_vm_bo_rmv - remove a bo to a specific vm | |
1111 | * | |
1112 | * @adev: amdgpu_device pointer | |
1113 | * @bo_va: requested bo_va | |
1114 | * | |
1115 | * Remove @bo_va->bo from the requested vm (cayman+). | |
1116 | * | |
1117 | * Object have to be reserved! | |
1118 | */ | |
1119 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |
1120 | struct amdgpu_bo_va *bo_va) | |
1121 | { | |
1122 | struct amdgpu_bo_va_mapping *mapping, *next; | |
1123 | struct amdgpu_vm *vm = bo_va->vm; | |
1124 | ||
1125 | list_del(&bo_va->bo_list); | |
1126 | ||
1127 | mutex_lock(&vm->mutex); | |
1128 | ||
1129 | spin_lock(&vm->status_lock); | |
1130 | list_del(&bo_va->vm_status); | |
1131 | spin_unlock(&vm->status_lock); | |
1132 | ||
1133 | list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { | |
1134 | list_del(&mapping->list); | |
1135 | interval_tree_remove(&mapping->it, &vm->va); | |
1136 | if (bo_va->addr) | |
1137 | list_add(&mapping->list, &vm->freed); | |
1138 | else | |
1139 | kfree(mapping); | |
1140 | } | |
1141 | amdgpu_fence_unref(&bo_va->last_pt_update); | |
1142 | kfree(bo_va); | |
1143 | ||
1144 | mutex_unlock(&vm->mutex); | |
1145 | } | |
1146 | ||
1147 | /** | |
1148 | * amdgpu_vm_bo_invalidate - mark the bo as invalid | |
1149 | * | |
1150 | * @adev: amdgpu_device pointer | |
1151 | * @vm: requested vm | |
1152 | * @bo: amdgpu buffer object | |
1153 | * | |
1154 | * Mark @bo as invalid (cayman+). | |
1155 | */ | |
1156 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | |
1157 | struct amdgpu_bo *bo) | |
1158 | { | |
1159 | struct amdgpu_bo_va *bo_va; | |
1160 | ||
1161 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
1162 | if (bo_va->addr) { | |
1163 | spin_lock(&bo_va->vm->status_lock); | |
1164 | list_del(&bo_va->vm_status); | |
1165 | list_add(&bo_va->vm_status, &bo_va->vm->invalidated); | |
1166 | spin_unlock(&bo_va->vm->status_lock); | |
1167 | } | |
1168 | } | |
1169 | } | |
1170 | ||
1171 | /** | |
1172 | * amdgpu_vm_init - initialize a vm instance | |
1173 | * | |
1174 | * @adev: amdgpu_device pointer | |
1175 | * @vm: requested vm | |
1176 | * | |
1177 | * Init @vm fields (cayman+). | |
1178 | */ | |
1179 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1180 | { | |
1181 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, | |
1182 | AMDGPU_VM_PTE_COUNT * 8); | |
1183 | unsigned pd_size, pd_entries, pts_size; | |
1184 | int i, r; | |
1185 | ||
1186 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
1187 | vm->ids[i].id = 0; | |
1188 | vm->ids[i].flushed_updates = NULL; | |
1189 | vm->ids[i].last_id_use = NULL; | |
1190 | } | |
1191 | mutex_init(&vm->mutex); | |
1192 | vm->va = RB_ROOT; | |
1193 | spin_lock_init(&vm->status_lock); | |
1194 | INIT_LIST_HEAD(&vm->invalidated); | |
1195 | INIT_LIST_HEAD(&vm->freed); | |
1196 | ||
1197 | pd_size = amdgpu_vm_directory_size(adev); | |
1198 | pd_entries = amdgpu_vm_num_pdes(adev); | |
1199 | ||
1200 | /* allocate page table array */ | |
1201 | pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); | |
1202 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | |
1203 | if (vm->page_tables == NULL) { | |
1204 | DRM_ERROR("Cannot allocate memory for page table array\n"); | |
1205 | return -ENOMEM; | |
1206 | } | |
1207 | ||
1208 | r = amdgpu_bo_create(adev, pd_size, align, true, | |
1209 | AMDGPU_GEM_DOMAIN_VRAM, 0, | |
1210 | NULL, &vm->page_directory); | |
1211 | if (r) | |
1212 | return r; | |
1213 | ||
1214 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); | |
1215 | if (r) { | |
1216 | amdgpu_bo_unref(&vm->page_directory); | |
1217 | vm->page_directory = NULL; | |
1218 | return r; | |
1219 | } | |
1220 | ||
1221 | return 0; | |
1222 | } | |
1223 | ||
1224 | /** | |
1225 | * amdgpu_vm_fini - tear down a vm instance | |
1226 | * | |
1227 | * @adev: amdgpu_device pointer | |
1228 | * @vm: requested vm | |
1229 | * | |
1230 | * Tear down @vm (cayman+). | |
1231 | * Unbind the VM and remove all bos from the vm bo list | |
1232 | */ | |
1233 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1234 | { | |
1235 | struct amdgpu_bo_va_mapping *mapping, *tmp; | |
1236 | int i; | |
1237 | ||
1238 | if (!RB_EMPTY_ROOT(&vm->va)) { | |
1239 | dev_err(adev->dev, "still active bo inside vm\n"); | |
1240 | } | |
1241 | rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { | |
1242 | list_del(&mapping->list); | |
1243 | interval_tree_remove(&mapping->it, &vm->va); | |
1244 | kfree(mapping); | |
1245 | } | |
1246 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { | |
1247 | list_del(&mapping->list); | |
1248 | kfree(mapping); | |
1249 | } | |
1250 | ||
1251 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) | |
1252 | amdgpu_bo_unref(&vm->page_tables[i].bo); | |
1253 | kfree(vm->page_tables); | |
1254 | ||
1255 | amdgpu_bo_unref(&vm->page_directory); | |
1256 | ||
1257 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | |
1258 | amdgpu_fence_unref(&vm->ids[i].flushed_updates); | |
1259 | amdgpu_fence_unref(&vm->ids[i].last_id_use); | |
1260 | } | |
1261 | ||
1262 | mutex_destroy(&vm->mutex); | |
1263 | } |