]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
f54d1867 | 28 | #include <linux/dma-fence-array.h> |
d38ceaf9 AD |
29 | #include <drm/drmP.h> |
30 | #include <drm/amdgpu_drm.h> | |
31 | #include "amdgpu.h" | |
32 | #include "amdgpu_trace.h" | |
33 | ||
34 | /* | |
35 | * GPUVM | |
36 | * GPUVM is similar to the legacy gart on older asics, however | |
37 | * rather than there being a single global gart table | |
38 | * for the entire GPU, there are multiple VM page tables active | |
39 | * at any given time. The VM page tables can contain a mix | |
40 | * vram pages and system memory pages and system memory pages | |
41 | * can be mapped as snooped (cached system pages) or unsnooped | |
42 | * (uncached system pages). | |
43 | * Each VM has an ID associated with it and there is a page table | |
44 | * associated with each VMID. When execting a command buffer, | |
45 | * the kernel tells the the ring what VMID to use for that command | |
46 | * buffer. VMIDs are allocated dynamically as commands are submitted. | |
47 | * The userspace drivers maintain their own address space and the kernel | |
48 | * sets up their pages tables accordingly when they submit their | |
49 | * command buffers and a VMID is assigned. | |
50 | * Cayman/Trinity support up to 8 active VMs at any given time; | |
51 | * SI supports 16. | |
52 | */ | |
53 | ||
f4833c4f HK |
54 | /* Local structure. Encapsulate some VM table update parameters to reduce |
55 | * the number of function parameters | |
56 | */ | |
29efc4f5 | 57 | struct amdgpu_pte_update_params { |
27c5f36f CK |
58 | /* amdgpu device we do this update for */ |
59 | struct amdgpu_device *adev; | |
f4833c4f HK |
60 | /* address where to copy page table entries from */ |
61 | uint64_t src; | |
f4833c4f HK |
62 | /* indirect buffer to fill with commands */ |
63 | struct amdgpu_ib *ib; | |
afef8b8f CK |
64 | /* Function which actually does the update */ |
65 | void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, | |
66 | uint64_t addr, unsigned count, uint32_t incr, | |
67 | uint32_t flags); | |
4c7e8855 CZ |
68 | /* indicate update pt or its shadow */ |
69 | bool shadow; | |
f4833c4f HK |
70 | }; |
71 | ||
284710fa CK |
72 | /* Helper to disable partial resident texture feature from a fence callback */ |
73 | struct amdgpu_prt_cb { | |
74 | struct amdgpu_device *adev; | |
75 | struct dma_fence_cb cb; | |
76 | }; | |
77 | ||
d38ceaf9 AD |
78 | /** |
79 | * amdgpu_vm_num_pde - return the number of page directory entries | |
80 | * | |
81 | * @adev: amdgpu_device pointer | |
82 | * | |
8843dbbb | 83 | * Calculate the number of page directory entries. |
d38ceaf9 AD |
84 | */ |
85 | static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) | |
86 | { | |
87 | return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; | |
88 | } | |
89 | ||
90 | /** | |
91 | * amdgpu_vm_directory_size - returns the size of the page directory in bytes | |
92 | * | |
93 | * @adev: amdgpu_device pointer | |
94 | * | |
8843dbbb | 95 | * Calculate the size of the page directory in bytes. |
d38ceaf9 AD |
96 | */ |
97 | static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) | |
98 | { | |
99 | return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); | |
100 | } | |
101 | ||
102 | /** | |
56467ebf | 103 | * amdgpu_vm_get_pd_bo - add the VM PD to a validation list |
d38ceaf9 AD |
104 | * |
105 | * @vm: vm providing the BOs | |
3c0eea6c | 106 | * @validated: head of validation list |
56467ebf | 107 | * @entry: entry to add |
d38ceaf9 AD |
108 | * |
109 | * Add the page directory to the list of BOs to | |
56467ebf | 110 | * validate for command submission. |
d38ceaf9 | 111 | */ |
56467ebf CK |
112 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, |
113 | struct list_head *validated, | |
114 | struct amdgpu_bo_list_entry *entry) | |
d38ceaf9 | 115 | { |
56467ebf | 116 | entry->robj = vm->page_directory; |
56467ebf CK |
117 | entry->priority = 0; |
118 | entry->tv.bo = &vm->page_directory->tbo; | |
119 | entry->tv.shared = true; | |
2f568dbd | 120 | entry->user_pages = NULL; |
56467ebf CK |
121 | list_add(&entry->tv.head, validated); |
122 | } | |
d38ceaf9 | 123 | |
56467ebf | 124 | /** |
f7da30d9 | 125 | * amdgpu_vm_validate_pt_bos - validate the page table BOs |
56467ebf | 126 | * |
5a712a87 | 127 | * @adev: amdgpu device pointer |
56467ebf | 128 | * @vm: vm providing the BOs |
f7da30d9 CK |
129 | * @validate: callback to do the validation |
130 | * @param: parameter for the validation callback | |
d38ceaf9 | 131 | * |
f7da30d9 | 132 | * Validate the page table BOs on command submission if neccessary. |
d38ceaf9 | 133 | */ |
f7da30d9 CK |
134 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
135 | int (*validate)(void *p, struct amdgpu_bo *bo), | |
136 | void *param) | |
d38ceaf9 | 137 | { |
5a712a87 | 138 | uint64_t num_evictions; |
ee1782c3 | 139 | unsigned i; |
f7da30d9 | 140 | int r; |
d38ceaf9 | 141 | |
5a712a87 CK |
142 | /* We only need to validate the page tables |
143 | * if they aren't already valid. | |
144 | */ | |
145 | num_evictions = atomic64_read(&adev->num_evictions); | |
146 | if (num_evictions == vm->last_eviction_counter) | |
f7da30d9 | 147 | return 0; |
5a712a87 | 148 | |
d38ceaf9 | 149 | /* add the vm page table to the list */ |
ee1782c3 | 150 | for (i = 0; i <= vm->max_pde_used; ++i) { |
914b4dce | 151 | struct amdgpu_bo *bo = vm->page_tables[i].bo; |
ee1782c3 | 152 | |
914b4dce | 153 | if (!bo) |
d38ceaf9 AD |
154 | continue; |
155 | ||
914b4dce | 156 | r = validate(param, bo); |
f7da30d9 CK |
157 | if (r) |
158 | return r; | |
d38ceaf9 | 159 | } |
eceb8a15 | 160 | |
f7da30d9 | 161 | return 0; |
eceb8a15 CK |
162 | } |
163 | ||
164 | /** | |
165 | * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail | |
166 | * | |
167 | * @adev: amdgpu device instance | |
168 | * @vm: vm providing the BOs | |
169 | * | |
170 | * Move the PT BOs to the tail of the LRU. | |
171 | */ | |
172 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | |
173 | struct amdgpu_vm *vm) | |
174 | { | |
175 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | |
176 | unsigned i; | |
177 | ||
178 | spin_lock(&glob->lru_lock); | |
179 | for (i = 0; i <= vm->max_pde_used; ++i) { | |
914b4dce | 180 | struct amdgpu_bo *bo = vm->page_tables[i].bo; |
eceb8a15 | 181 | |
914b4dce | 182 | if (!bo) |
eceb8a15 CK |
183 | continue; |
184 | ||
914b4dce | 185 | ttm_bo_move_to_lru_tail(&bo->tbo); |
eceb8a15 CK |
186 | } |
187 | spin_unlock(&glob->lru_lock); | |
d38ceaf9 AD |
188 | } |
189 | ||
192b7dcb CZ |
190 | static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, |
191 | struct amdgpu_vm_id *id) | |
192 | { | |
193 | return id->current_gpu_reset_count != | |
194 | atomic_read(&adev->gpu_reset_counter) ? true : false; | |
195 | } | |
196 | ||
d38ceaf9 AD |
197 | /** |
198 | * amdgpu_vm_grab_id - allocate the next free VMID | |
199 | * | |
d38ceaf9 | 200 | * @vm: vm to allocate id for |
7f8a5290 CK |
201 | * @ring: ring we want to submit job to |
202 | * @sync: sync object where we add dependencies | |
94dd0a4a | 203 | * @fence: fence protecting ID from reuse |
d38ceaf9 | 204 | * |
7f8a5290 | 205 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
d38ceaf9 | 206 | */ |
7f8a5290 | 207 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
f54d1867 | 208 | struct amdgpu_sync *sync, struct dma_fence *fence, |
fd53be30 | 209 | struct amdgpu_job *job) |
d38ceaf9 | 210 | { |
d38ceaf9 | 211 | struct amdgpu_device *adev = ring->adev; |
090b767e | 212 | uint64_t fence_context = adev->fence_context + ring->idx; |
f54d1867 | 213 | struct dma_fence *updates = sync->last_vm_update; |
8d76001e | 214 | struct amdgpu_vm_id *id, *idle; |
f54d1867 | 215 | struct dma_fence **fences; |
1fbb2e92 CK |
216 | unsigned i; |
217 | int r = 0; | |
218 | ||
219 | fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, | |
220 | GFP_KERNEL); | |
221 | if (!fences) | |
222 | return -ENOMEM; | |
d38ceaf9 | 223 | |
94dd0a4a CK |
224 | mutex_lock(&adev->vm_manager.lock); |
225 | ||
36fd7c5c | 226 | /* Check if we have an idle VMID */ |
1fbb2e92 | 227 | i = 0; |
8d76001e | 228 | list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { |
1fbb2e92 CK |
229 | fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); |
230 | if (!fences[i]) | |
36fd7c5c | 231 | break; |
1fbb2e92 | 232 | ++i; |
36fd7c5c CK |
233 | } |
234 | ||
1fbb2e92 | 235 | /* If we can't find a idle VMID to use, wait till one becomes available */ |
8d76001e | 236 | if (&idle->list == &adev->vm_manager.ids_lru) { |
1fbb2e92 CK |
237 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; |
238 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; | |
f54d1867 | 239 | struct dma_fence_array *array; |
1fbb2e92 CK |
240 | unsigned j; |
241 | ||
242 | for (j = 0; j < i; ++j) | |
f54d1867 | 243 | dma_fence_get(fences[j]); |
1fbb2e92 | 244 | |
f54d1867 | 245 | array = dma_fence_array_create(i, fences, fence_context, |
1fbb2e92 CK |
246 | seqno, true); |
247 | if (!array) { | |
248 | for (j = 0; j < i; ++j) | |
f54d1867 | 249 | dma_fence_put(fences[j]); |
1fbb2e92 CK |
250 | kfree(fences); |
251 | r = -ENOMEM; | |
252 | goto error; | |
253 | } | |
254 | ||
255 | ||
256 | r = amdgpu_sync_fence(ring->adev, sync, &array->base); | |
f54d1867 | 257 | dma_fence_put(&array->base); |
1fbb2e92 CK |
258 | if (r) |
259 | goto error; | |
260 | ||
261 | mutex_unlock(&adev->vm_manager.lock); | |
262 | return 0; | |
263 | ||
264 | } | |
265 | kfree(fences); | |
266 | ||
fd53be30 | 267 | job->vm_needs_flush = true; |
1fbb2e92 CK |
268 | /* Check if we can use a VMID already assigned to this VM */ |
269 | i = ring->idx; | |
270 | do { | |
f54d1867 | 271 | struct dma_fence *flushed; |
1fbb2e92 CK |
272 | |
273 | id = vm->ids[i++]; | |
274 | if (i == AMDGPU_MAX_RINGS) | |
275 | i = 0; | |
8d76001e | 276 | |
1fbb2e92 CK |
277 | /* Check all the prerequisites to using this VMID */ |
278 | if (!id) | |
279 | continue; | |
192b7dcb | 280 | if (amdgpu_vm_is_gpu_reset(adev, id)) |
6adb0513 | 281 | continue; |
1fbb2e92 CK |
282 | |
283 | if (atomic64_read(&id->owner) != vm->client_id) | |
284 | continue; | |
285 | ||
fd53be30 | 286 | if (job->vm_pd_addr != id->pd_gpu_addr) |
1fbb2e92 CK |
287 | continue; |
288 | ||
090b767e CK |
289 | if (!id->last_flush) |
290 | continue; | |
291 | ||
292 | if (id->last_flush->context != fence_context && | |
f54d1867 | 293 | !dma_fence_is_signaled(id->last_flush)) |
1fbb2e92 CK |
294 | continue; |
295 | ||
296 | flushed = id->flushed_updates; | |
297 | if (updates && | |
f54d1867 | 298 | (!flushed || dma_fence_is_later(updates, flushed))) |
1fbb2e92 CK |
299 | continue; |
300 | ||
3dab83be CK |
301 | /* Good we can use this VMID. Remember this submission as |
302 | * user of the VMID. | |
303 | */ | |
1fbb2e92 CK |
304 | r = amdgpu_sync_fence(ring->adev, &id->active, fence); |
305 | if (r) | |
306 | goto error; | |
8d76001e | 307 | |
6adb0513 | 308 | id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); |
1fbb2e92 CK |
309 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); |
310 | vm->ids[ring->idx] = id; | |
8d76001e | 311 | |
fd53be30 CZ |
312 | job->vm_id = id - adev->vm_manager.ids; |
313 | job->vm_needs_flush = false; | |
0c0fdf14 | 314 | trace_amdgpu_vm_grab_id(vm, ring->idx, job); |
8d76001e | 315 | |
1fbb2e92 CK |
316 | mutex_unlock(&adev->vm_manager.lock); |
317 | return 0; | |
8d76001e | 318 | |
1fbb2e92 | 319 | } while (i != ring->idx); |
8d76001e | 320 | |
1fbb2e92 CK |
321 | /* Still no ID to use? Then use the idle one found earlier */ |
322 | id = idle; | |
8e9fbeb5 | 323 | |
1fbb2e92 CK |
324 | /* Remember this submission as user of the VMID */ |
325 | r = amdgpu_sync_fence(ring->adev, &id->active, fence); | |
832a902f CK |
326 | if (r) |
327 | goto error; | |
94dd0a4a | 328 | |
f54d1867 CW |
329 | dma_fence_put(id->first); |
330 | id->first = dma_fence_get(fence); | |
94dd0a4a | 331 | |
f54d1867 | 332 | dma_fence_put(id->last_flush); |
41d9eb2c CK |
333 | id->last_flush = NULL; |
334 | ||
f54d1867 CW |
335 | dma_fence_put(id->flushed_updates); |
336 | id->flushed_updates = dma_fence_get(updates); | |
94dd0a4a | 337 | |
fd53be30 | 338 | id->pd_gpu_addr = job->vm_pd_addr; |
b46b8a87 | 339 | id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); |
832a902f | 340 | list_move_tail(&id->list, &adev->vm_manager.ids_lru); |
0ea54b9b | 341 | atomic64_set(&id->owner, vm->client_id); |
832a902f | 342 | vm->ids[ring->idx] = id; |
d38ceaf9 | 343 | |
fd53be30 | 344 | job->vm_id = id - adev->vm_manager.ids; |
0c0fdf14 | 345 | trace_amdgpu_vm_grab_id(vm, ring->idx, job); |
832a902f CK |
346 | |
347 | error: | |
94dd0a4a | 348 | mutex_unlock(&adev->vm_manager.lock); |
a9a78b32 | 349 | return r; |
d38ceaf9 AD |
350 | } |
351 | ||
93dcc37d AD |
352 | static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) |
353 | { | |
354 | struct amdgpu_device *adev = ring->adev; | |
a1255107 | 355 | const struct amdgpu_ip_block *ip_block; |
93dcc37d | 356 | |
21cd942e | 357 | if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) |
93dcc37d AD |
358 | /* only compute rings */ |
359 | return false; | |
360 | ||
361 | ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); | |
362 | if (!ip_block) | |
363 | return false; | |
364 | ||
a1255107 | 365 | if (ip_block->version->major <= 7) { |
93dcc37d AD |
366 | /* gfx7 has no workaround */ |
367 | return true; | |
a1255107 | 368 | } else if (ip_block->version->major == 8) { |
93dcc37d AD |
369 | if (adev->gfx.mec_fw_version >= 673) |
370 | /* gfx8 is fixed in MEC firmware 673 */ | |
371 | return false; | |
372 | else | |
373 | return true; | |
374 | } | |
375 | return false; | |
376 | } | |
377 | ||
d38ceaf9 AD |
378 | /** |
379 | * amdgpu_vm_flush - hardware flush the vm | |
380 | * | |
381 | * @ring: ring to use for flush | |
cffadc83 | 382 | * @vm_id: vmid number to use |
4ff37a83 | 383 | * @pd_addr: address of the page directory |
d38ceaf9 | 384 | * |
4ff37a83 | 385 | * Emit a VM flush when it is necessary. |
d38ceaf9 | 386 | */ |
fd53be30 | 387 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) |
d38ceaf9 | 388 | { |
971fe9a9 | 389 | struct amdgpu_device *adev = ring->adev; |
fd53be30 | 390 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id]; |
d564a06e | 391 | bool gds_switch_needed = ring->funcs->emit_gds_switch && ( |
fd53be30 CZ |
392 | id->gds_base != job->gds_base || |
393 | id->gds_size != job->gds_size || | |
394 | id->gws_base != job->gws_base || | |
395 | id->gws_size != job->gws_size || | |
396 | id->oa_base != job->oa_base || | |
397 | id->oa_size != job->oa_size); | |
41d9eb2c | 398 | int r; |
d564a06e CK |
399 | |
400 | if (ring->funcs->emit_pipeline_sync && ( | |
fd53be30 | 401 | job->vm_needs_flush || gds_switch_needed || |
93dcc37d | 402 | amdgpu_vm_ring_has_compute_vm_bug(ring))) |
d564a06e | 403 | amdgpu_ring_emit_pipeline_sync(ring); |
971fe9a9 | 404 | |
aa1c8900 CZ |
405 | if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || |
406 | amdgpu_vm_is_gpu_reset(adev, id))) { | |
f54d1867 | 407 | struct dma_fence *fence; |
41d9eb2c | 408 | |
fd53be30 CZ |
409 | trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); |
410 | amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); | |
41d9eb2c | 411 | |
3dab83be CK |
412 | r = amdgpu_fence_emit(ring, &fence); |
413 | if (r) | |
414 | return r; | |
415 | ||
41d9eb2c | 416 | mutex_lock(&adev->vm_manager.lock); |
f54d1867 | 417 | dma_fence_put(id->last_flush); |
3dab83be | 418 | id->last_flush = fence; |
41d9eb2c | 419 | mutex_unlock(&adev->vm_manager.lock); |
d38ceaf9 | 420 | } |
cffadc83 | 421 | |
d564a06e | 422 | if (gds_switch_needed) { |
fd53be30 CZ |
423 | id->gds_base = job->gds_base; |
424 | id->gds_size = job->gds_size; | |
425 | id->gws_base = job->gws_base; | |
426 | id->gws_size = job->gws_size; | |
427 | id->oa_base = job->oa_base; | |
428 | id->oa_size = job->oa_size; | |
429 | amdgpu_ring_emit_gds_switch(ring, job->vm_id, | |
430 | job->gds_base, job->gds_size, | |
431 | job->gws_base, job->gws_size, | |
432 | job->oa_base, job->oa_size); | |
971fe9a9 | 433 | } |
41d9eb2c CK |
434 | |
435 | return 0; | |
971fe9a9 CK |
436 | } |
437 | ||
438 | /** | |
439 | * amdgpu_vm_reset_id - reset VMID to zero | |
440 | * | |
441 | * @adev: amdgpu device structure | |
442 | * @vm_id: vmid number to use | |
443 | * | |
444 | * Reset saved GDW, GWS and OA to force switch on next flush. | |
445 | */ | |
446 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id) | |
447 | { | |
bcb1ba35 CK |
448 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; |
449 | ||
450 | id->gds_base = 0; | |
451 | id->gds_size = 0; | |
452 | id->gws_base = 0; | |
453 | id->gws_size = 0; | |
454 | id->oa_base = 0; | |
455 | id->oa_size = 0; | |
d38ceaf9 AD |
456 | } |
457 | ||
d38ceaf9 AD |
458 | /** |
459 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo | |
460 | * | |
461 | * @vm: requested vm | |
462 | * @bo: requested buffer object | |
463 | * | |
8843dbbb | 464 | * Find @bo inside the requested vm. |
d38ceaf9 AD |
465 | * Search inside the @bos vm list for the requested vm |
466 | * Returns the found bo_va or NULL if none is found | |
467 | * | |
468 | * Object has to be reserved! | |
469 | */ | |
470 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | |
471 | struct amdgpu_bo *bo) | |
472 | { | |
473 | struct amdgpu_bo_va *bo_va; | |
474 | ||
475 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
476 | if (bo_va->vm == vm) { | |
477 | return bo_va; | |
478 | } | |
479 | } | |
480 | return NULL; | |
481 | } | |
482 | ||
483 | /** | |
afef8b8f | 484 | * amdgpu_vm_do_set_ptes - helper to call the right asic function |
d38ceaf9 | 485 | * |
29efc4f5 | 486 | * @params: see amdgpu_pte_update_params definition |
d38ceaf9 AD |
487 | * @pe: addr of the page entry |
488 | * @addr: dst addr to write into pe | |
489 | * @count: number of page entries to update | |
490 | * @incr: increase next addr by incr bytes | |
491 | * @flags: hw access flags | |
d38ceaf9 AD |
492 | * |
493 | * Traces the parameters and calls the right asic functions | |
494 | * to setup the page table using the DMA. | |
495 | */ | |
afef8b8f CK |
496 | static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, |
497 | uint64_t pe, uint64_t addr, | |
498 | unsigned count, uint32_t incr, | |
499 | uint32_t flags) | |
d38ceaf9 | 500 | { |
ec2f05f0 | 501 | trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); |
d38ceaf9 | 502 | |
afef8b8f | 503 | if (count < 3) { |
de9ea7bd CK |
504 | amdgpu_vm_write_pte(params->adev, params->ib, pe, |
505 | addr | flags, count, incr); | |
d38ceaf9 AD |
506 | |
507 | } else { | |
27c5f36f | 508 | amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, |
d38ceaf9 AD |
509 | count, incr, flags); |
510 | } | |
511 | } | |
512 | ||
afef8b8f CK |
513 | /** |
514 | * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART | |
515 | * | |
516 | * @params: see amdgpu_pte_update_params definition | |
517 | * @pe: addr of the page entry | |
518 | * @addr: dst addr to write into pe | |
519 | * @count: number of page entries to update | |
520 | * @incr: increase next addr by incr bytes | |
521 | * @flags: hw access flags | |
522 | * | |
523 | * Traces the parameters and calls the DMA function to copy the PTEs. | |
524 | */ | |
525 | static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, | |
526 | uint64_t pe, uint64_t addr, | |
527 | unsigned count, uint32_t incr, | |
528 | uint32_t flags) | |
529 | { | |
ec2f05f0 | 530 | uint64_t src = (params->src + (addr >> 12) * 8); |
afef8b8f | 531 | |
ec2f05f0 CK |
532 | |
533 | trace_amdgpu_vm_copy_ptes(pe, src, count); | |
534 | ||
535 | amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); | |
afef8b8f CK |
536 | } |
537 | ||
d38ceaf9 | 538 | /** |
b07c9d2a | 539 | * amdgpu_vm_map_gart - Resolve gart mapping of addr |
d38ceaf9 | 540 | * |
b07c9d2a | 541 | * @pages_addr: optional DMA address to use for lookup |
d38ceaf9 AD |
542 | * @addr: the unmapped addr |
543 | * | |
544 | * Look up the physical address of the page that the pte resolves | |
b07c9d2a | 545 | * to and return the pointer for the page table entry. |
d38ceaf9 | 546 | */ |
de9ea7bd | 547 | static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) |
d38ceaf9 AD |
548 | { |
549 | uint64_t result; | |
550 | ||
de9ea7bd CK |
551 | /* page table offset */ |
552 | result = pages_addr[addr >> PAGE_SHIFT]; | |
b07c9d2a | 553 | |
de9ea7bd CK |
554 | /* in case cpu page size != gpu page size*/ |
555 | result |= addr & (~PAGE_MASK); | |
d38ceaf9 | 556 | |
b07c9d2a | 557 | result &= 0xFFFFFFFFFFFFF000ULL; |
d38ceaf9 AD |
558 | |
559 | return result; | |
560 | } | |
561 | ||
f8991bab CK |
562 | /* |
563 | * amdgpu_vm_update_pdes - make sure that page directory is valid | |
564 | * | |
565 | * @adev: amdgpu_device pointer | |
566 | * @vm: requested vm | |
567 | * @start: start of GPU address range | |
568 | * @end: end of GPU address range | |
569 | * | |
570 | * Allocates new page tables if necessary | |
571 | * and updates the page directory. | |
572 | * Returns 0 for success, error for failure. | |
573 | */ | |
574 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | |
575 | struct amdgpu_vm *vm) | |
d38ceaf9 | 576 | { |
f8991bab | 577 | struct amdgpu_bo *shadow; |
2d55e45a | 578 | struct amdgpu_ring *ring; |
f8991bab | 579 | uint64_t pd_addr, shadow_addr; |
d38ceaf9 | 580 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; |
f8991bab | 581 | uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; |
d38ceaf9 | 582 | unsigned count = 0, pt_idx, ndw; |
d71518b5 | 583 | struct amdgpu_job *job; |
29efc4f5 | 584 | struct amdgpu_pte_update_params params; |
f54d1867 | 585 | struct dma_fence *fence = NULL; |
d5fc5e82 | 586 | |
d38ceaf9 AD |
587 | int r; |
588 | ||
2d55e45a | 589 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
f8991bab | 590 | shadow = vm->page_directory->shadow; |
2d55e45a | 591 | |
d38ceaf9 AD |
592 | /* padding, etc. */ |
593 | ndw = 64; | |
594 | ||
595 | /* assume the worst case */ | |
596 | ndw += vm->max_pde_used * 6; | |
597 | ||
f8991bab CK |
598 | pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
599 | if (shadow) { | |
600 | r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); | |
601 | if (r) | |
602 | return r; | |
603 | shadow_addr = amdgpu_bo_gpu_offset(shadow); | |
604 | ndw *= 2; | |
605 | } else { | |
606 | shadow_addr = 0; | |
607 | } | |
608 | ||
d71518b5 CK |
609 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
610 | if (r) | |
d38ceaf9 | 611 | return r; |
d71518b5 | 612 | |
27c5f36f CK |
613 | memset(¶ms, 0, sizeof(params)); |
614 | params.adev = adev; | |
29efc4f5 | 615 | params.ib = &job->ibs[0]; |
d38ceaf9 AD |
616 | |
617 | /* walk over the address space and update the page directory */ | |
618 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | |
914b4dce | 619 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; |
d38ceaf9 AD |
620 | uint64_t pde, pt; |
621 | ||
622 | if (bo == NULL) | |
623 | continue; | |
624 | ||
0fc8683e | 625 | if (bo->shadow) { |
f8991bab | 626 | struct amdgpu_bo *pt_shadow = bo->shadow; |
0fc8683e | 627 | |
f8991bab CK |
628 | r = amdgpu_ttm_bind(&pt_shadow->tbo, |
629 | &pt_shadow->tbo.mem); | |
0fc8683e CK |
630 | if (r) |
631 | return r; | |
632 | } | |
633 | ||
d38ceaf9 | 634 | pt = amdgpu_bo_gpu_offset(bo); |
f8991bab CK |
635 | if (vm->page_tables[pt_idx].addr == pt) |
636 | continue; | |
637 | ||
638 | vm->page_tables[pt_idx].addr = pt; | |
d38ceaf9 AD |
639 | |
640 | pde = pd_addr + pt_idx * 8; | |
641 | if (((last_pde + 8 * count) != pde) || | |
96105e53 CK |
642 | ((last_pt + incr * count) != pt) || |
643 | (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { | |
d38ceaf9 AD |
644 | |
645 | if (count) { | |
f8991bab CK |
646 | if (shadow) |
647 | amdgpu_vm_do_set_ptes(¶ms, | |
648 | last_shadow, | |
649 | last_pt, count, | |
650 | incr, | |
651 | AMDGPU_PTE_VALID); | |
652 | ||
afef8b8f CK |
653 | amdgpu_vm_do_set_ptes(¶ms, last_pde, |
654 | last_pt, count, incr, | |
655 | AMDGPU_PTE_VALID); | |
d38ceaf9 AD |
656 | } |
657 | ||
658 | count = 1; | |
659 | last_pde = pde; | |
f8991bab | 660 | last_shadow = shadow_addr + pt_idx * 8; |
d38ceaf9 AD |
661 | last_pt = pt; |
662 | } else { | |
663 | ++count; | |
664 | } | |
665 | } | |
666 | ||
f8991bab CK |
667 | if (count) { |
668 | if (vm->page_directory->shadow) | |
669 | amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt, | |
670 | count, incr, AMDGPU_PTE_VALID); | |
671 | ||
afef8b8f CK |
672 | amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, |
673 | count, incr, AMDGPU_PTE_VALID); | |
f8991bab | 674 | } |
d38ceaf9 | 675 | |
f8991bab CK |
676 | if (params.ib->length_dw == 0) { |
677 | amdgpu_job_free(job); | |
678 | return 0; | |
679 | } | |
680 | ||
681 | amdgpu_ring_pad_ib(ring, params.ib); | |
682 | amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, | |
683 | AMDGPU_FENCE_OWNER_VM); | |
684 | if (shadow) | |
685 | amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, | |
e86f9cee | 686 | AMDGPU_FENCE_OWNER_VM); |
05906dec | 687 | |
f8991bab CK |
688 | WARN_ON(params.ib->length_dw > ndw); |
689 | r = amdgpu_job_submit(job, ring, &vm->entity, | |
690 | AMDGPU_FENCE_OWNER_VM, &fence); | |
691 | if (r) | |
692 | goto error_free; | |
d5fc5e82 | 693 | |
f8991bab | 694 | amdgpu_bo_fence(vm->page_directory, fence, true); |
220196b3 DA |
695 | dma_fence_put(vm->page_directory_fence); |
696 | vm->page_directory_fence = dma_fence_get(fence); | |
697 | dma_fence_put(fence); | |
d38ceaf9 AD |
698 | |
699 | return 0; | |
d5fc5e82 CZ |
700 | |
701 | error_free: | |
d71518b5 | 702 | amdgpu_job_free(job); |
4af9f07c | 703 | return r; |
d38ceaf9 AD |
704 | } |
705 | ||
d38ceaf9 AD |
706 | /** |
707 | * amdgpu_vm_update_ptes - make sure that page tables are valid | |
708 | * | |
29efc4f5 | 709 | * @params: see amdgpu_pte_update_params definition |
d38ceaf9 AD |
710 | * @vm: requested vm |
711 | * @start: start of GPU address range | |
712 | * @end: end of GPU address range | |
677131a1 | 713 | * @dst: destination address to map to, the next dst inside the function |
d38ceaf9 AD |
714 | * @flags: mapping flags |
715 | * | |
8843dbbb | 716 | * Update the page tables in the range @start - @end. |
d38ceaf9 | 717 | */ |
27c5f36f | 718 | static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, |
a1e08d3b | 719 | struct amdgpu_vm *vm, |
a1e08d3b CK |
720 | uint64_t start, uint64_t end, |
721 | uint64_t dst, uint32_t flags) | |
d38ceaf9 | 722 | { |
31f6c1fe CK |
723 | const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; |
724 | ||
92696dd5 | 725 | uint64_t cur_pe_start, cur_nptes, cur_dst; |
677131a1 | 726 | uint64_t addr; /* next GPU address to be updated */ |
21718497 AX |
727 | uint64_t pt_idx; |
728 | struct amdgpu_bo *pt; | |
729 | unsigned nptes; /* next number of ptes to be updated */ | |
730 | uint64_t next_pe_start; | |
731 | ||
732 | /* initialize the variables */ | |
733 | addr = start; | |
734 | pt_idx = addr >> amdgpu_vm_block_size; | |
914b4dce | 735 | pt = vm->page_tables[pt_idx].bo; |
4c7e8855 CZ |
736 | if (params->shadow) { |
737 | if (!pt->shadow) | |
738 | return; | |
914b4dce | 739 | pt = pt->shadow; |
4c7e8855 | 740 | } |
21718497 AX |
741 | if ((addr & ~mask) == (end & ~mask)) |
742 | nptes = end - addr; | |
743 | else | |
744 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | |
745 | ||
746 | cur_pe_start = amdgpu_bo_gpu_offset(pt); | |
747 | cur_pe_start += (addr & mask) * 8; | |
92696dd5 | 748 | cur_nptes = nptes; |
21718497 AX |
749 | cur_dst = dst; |
750 | ||
751 | /* for next ptb*/ | |
752 | addr += nptes; | |
753 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | |
d38ceaf9 AD |
754 | |
755 | /* walk over the address space and update the page tables */ | |
21718497 AX |
756 | while (addr < end) { |
757 | pt_idx = addr >> amdgpu_vm_block_size; | |
914b4dce | 758 | pt = vm->page_tables[pt_idx].bo; |
4c7e8855 CZ |
759 | if (params->shadow) { |
760 | if (!pt->shadow) | |
761 | return; | |
914b4dce | 762 | pt = pt->shadow; |
4c7e8855 | 763 | } |
d38ceaf9 AD |
764 | |
765 | if ((addr & ~mask) == (end & ~mask)) | |
766 | nptes = end - addr; | |
767 | else | |
768 | nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); | |
769 | ||
677131a1 AX |
770 | next_pe_start = amdgpu_bo_gpu_offset(pt); |
771 | next_pe_start += (addr & mask) * 8; | |
d38ceaf9 | 772 | |
96105e53 CK |
773 | if ((cur_pe_start + 8 * cur_nptes) == next_pe_start && |
774 | ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) { | |
3a6f8e0c | 775 | /* The next ptb is consecutive to current ptb. |
afef8b8f | 776 | * Don't call the update function now. |
3a6f8e0c AX |
777 | * Will update two ptbs together in future. |
778 | */ | |
92696dd5 | 779 | cur_nptes += nptes; |
3a6f8e0c | 780 | } else { |
afef8b8f CK |
781 | params->func(params, cur_pe_start, cur_dst, cur_nptes, |
782 | AMDGPU_GPU_PAGE_SIZE, flags); | |
d38ceaf9 | 783 | |
677131a1 | 784 | cur_pe_start = next_pe_start; |
92696dd5 | 785 | cur_nptes = nptes; |
677131a1 | 786 | cur_dst = dst; |
d38ceaf9 AD |
787 | } |
788 | ||
21718497 | 789 | /* for next ptb*/ |
d38ceaf9 AD |
790 | addr += nptes; |
791 | dst += nptes * AMDGPU_GPU_PAGE_SIZE; | |
792 | } | |
793 | ||
afef8b8f CK |
794 | params->func(params, cur_pe_start, cur_dst, cur_nptes, |
795 | AMDGPU_GPU_PAGE_SIZE, flags); | |
92696dd5 CK |
796 | } |
797 | ||
798 | /* | |
799 | * amdgpu_vm_frag_ptes - add fragment information to PTEs | |
800 | * | |
801 | * @params: see amdgpu_pte_update_params definition | |
802 | * @vm: requested vm | |
803 | * @start: first PTE to handle | |
804 | * @end: last PTE to handle | |
805 | * @dst: addr those PTEs should point to | |
806 | * @flags: hw mapping flags | |
807 | */ | |
808 | static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, | |
809 | struct amdgpu_vm *vm, | |
810 | uint64_t start, uint64_t end, | |
811 | uint64_t dst, uint32_t flags) | |
812 | { | |
813 | /** | |
814 | * The MC L1 TLB supports variable sized pages, based on a fragment | |
815 | * field in the PTE. When this field is set to a non-zero value, page | |
816 | * granularity is increased from 4KB to (1 << (12 + frag)). The PTE | |
817 | * flags are considered valid for all PTEs within the fragment range | |
818 | * and corresponding mappings are assumed to be physically contiguous. | |
819 | * | |
820 | * The L1 TLB can store a single PTE for the whole fragment, | |
821 | * significantly increasing the space available for translation | |
822 | * caching. This leads to large improvements in throughput when the | |
823 | * TLB is under pressure. | |
824 | * | |
825 | * The L2 TLB distributes small and large fragments into two | |
826 | * asymmetric partitions. The large fragment cache is significantly | |
827 | * larger. Thus, we try to use large fragments wherever possible. | |
828 | * Userspace can support this by aligning virtual base address and | |
829 | * allocation size to the fragment size. | |
830 | */ | |
831 | ||
8036617e CK |
832 | /* SI and newer are optimized for 64KB */ |
833 | uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); | |
834 | uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; | |
92696dd5 CK |
835 | |
836 | uint64_t frag_start = ALIGN(start, frag_align); | |
837 | uint64_t frag_end = end & ~(frag_align - 1); | |
838 | ||
839 | /* system pages are non continuously */ | |
b7fc2cbd | 840 | if (params->src || !(flags & AMDGPU_PTE_VALID) || |
92696dd5 CK |
841 | (frag_start >= frag_end)) { |
842 | ||
843 | amdgpu_vm_update_ptes(params, vm, start, end, dst, flags); | |
844 | return; | |
845 | } | |
846 | ||
847 | /* handle the 4K area at the beginning */ | |
848 | if (start != frag_start) { | |
849 | amdgpu_vm_update_ptes(params, vm, start, frag_start, | |
850 | dst, flags); | |
851 | dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; | |
852 | } | |
853 | ||
854 | /* handle the area in the middle */ | |
855 | amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, | |
8036617e | 856 | flags | frag_flags); |
92696dd5 CK |
857 | |
858 | /* handle the 4K area at the end */ | |
859 | if (frag_end != end) { | |
860 | dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; | |
861 | amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags); | |
862 | } | |
d38ceaf9 AD |
863 | } |
864 | ||
d38ceaf9 AD |
865 | /** |
866 | * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table | |
867 | * | |
868 | * @adev: amdgpu_device pointer | |
3cabaa54 | 869 | * @exclusive: fence we need to sync to |
fa3ab3c7 CK |
870 | * @src: address where to copy page table entries from |
871 | * @pages_addr: DMA addresses to use for mapping | |
d38ceaf9 | 872 | * @vm: requested vm |
a14faa65 CK |
873 | * @start: start of mapped range |
874 | * @last: last mapped entry | |
875 | * @flags: flags for the entries | |
d38ceaf9 | 876 | * @addr: addr to set the area to |
d38ceaf9 AD |
877 | * @fence: optional resulting fence |
878 | * | |
a14faa65 | 879 | * Fill in the page table entries between @start and @last. |
d38ceaf9 | 880 | * Returns 0 for success, -EINVAL for failure. |
d38ceaf9 AD |
881 | */ |
882 | static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, | |
f54d1867 | 883 | struct dma_fence *exclusive, |
fa3ab3c7 CK |
884 | uint64_t src, |
885 | dma_addr_t *pages_addr, | |
d38ceaf9 | 886 | struct amdgpu_vm *vm, |
a14faa65 CK |
887 | uint64_t start, uint64_t last, |
888 | uint32_t flags, uint64_t addr, | |
f54d1867 | 889 | struct dma_fence **fence) |
d38ceaf9 | 890 | { |
2d55e45a | 891 | struct amdgpu_ring *ring; |
a1e08d3b | 892 | void *owner = AMDGPU_FENCE_OWNER_VM; |
d38ceaf9 | 893 | unsigned nptes, ncmds, ndw; |
d71518b5 | 894 | struct amdgpu_job *job; |
29efc4f5 | 895 | struct amdgpu_pte_update_params params; |
f54d1867 | 896 | struct dma_fence *f = NULL; |
d38ceaf9 AD |
897 | int r; |
898 | ||
afef8b8f CK |
899 | memset(¶ms, 0, sizeof(params)); |
900 | params.adev = adev; | |
901 | params.src = src; | |
902 | ||
2d55e45a | 903 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
27c5f36f | 904 | |
29efc4f5 | 905 | memset(¶ms, 0, sizeof(params)); |
27c5f36f | 906 | params.adev = adev; |
29efc4f5 | 907 | params.src = src; |
2d55e45a | 908 | |
a1e08d3b CK |
909 | /* sync to everything on unmapping */ |
910 | if (!(flags & AMDGPU_PTE_VALID)) | |
911 | owner = AMDGPU_FENCE_OWNER_UNDEFINED; | |
912 | ||
a14faa65 | 913 | nptes = last - start + 1; |
d38ceaf9 AD |
914 | |
915 | /* | |
916 | * reserve space for one command every (1 << BLOCK_SIZE) | |
917 | * entries or 2k dwords (whatever is smaller) | |
918 | */ | |
919 | ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; | |
920 | ||
921 | /* padding, etc. */ | |
922 | ndw = 64; | |
923 | ||
b0456f93 | 924 | if (src) { |
d38ceaf9 AD |
925 | /* only copy commands needed */ |
926 | ndw += ncmds * 7; | |
927 | ||
afef8b8f CK |
928 | params.func = amdgpu_vm_do_copy_ptes; |
929 | ||
b0456f93 CK |
930 | } else if (pages_addr) { |
931 | /* copy commands needed */ | |
932 | ndw += ncmds * 7; | |
d38ceaf9 | 933 | |
b0456f93 | 934 | /* and also PTEs */ |
d38ceaf9 AD |
935 | ndw += nptes * 2; |
936 | ||
afef8b8f CK |
937 | params.func = amdgpu_vm_do_copy_ptes; |
938 | ||
d38ceaf9 AD |
939 | } else { |
940 | /* set page commands needed */ | |
941 | ndw += ncmds * 10; | |
942 | ||
943 | /* two extra commands for begin/end of fragment */ | |
944 | ndw += 2 * 10; | |
afef8b8f CK |
945 | |
946 | params.func = amdgpu_vm_do_set_ptes; | |
d38ceaf9 AD |
947 | } |
948 | ||
d71518b5 CK |
949 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
950 | if (r) | |
d38ceaf9 | 951 | return r; |
d71518b5 | 952 | |
29efc4f5 | 953 | params.ib = &job->ibs[0]; |
d5fc5e82 | 954 | |
b0456f93 CK |
955 | if (!src && pages_addr) { |
956 | uint64_t *pte; | |
957 | unsigned i; | |
958 | ||
959 | /* Put the PTEs at the end of the IB. */ | |
960 | i = ndw - nptes * 2; | |
961 | pte= (uint64_t *)&(job->ibs->ptr[i]); | |
962 | params.src = job->ibs->gpu_addr + i * 4; | |
963 | ||
964 | for (i = 0; i < nptes; ++i) { | |
965 | pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * | |
966 | AMDGPU_GPU_PAGE_SIZE); | |
967 | pte[i] |= flags; | |
968 | } | |
d7a4ac66 | 969 | addr = 0; |
b0456f93 CK |
970 | } |
971 | ||
3cabaa54 CK |
972 | r = amdgpu_sync_fence(adev, &job->sync, exclusive); |
973 | if (r) | |
974 | goto error_free; | |
975 | ||
e86f9cee | 976 | r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, |
a1e08d3b CK |
977 | owner); |
978 | if (r) | |
979 | goto error_free; | |
d38ceaf9 | 980 | |
a1e08d3b CK |
981 | r = reservation_object_reserve_shared(vm->page_directory->tbo.resv); |
982 | if (r) | |
983 | goto error_free; | |
984 | ||
4c7e8855 CZ |
985 | params.shadow = true; |
986 | amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); | |
987 | params.shadow = false; | |
92696dd5 | 988 | amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); |
d38ceaf9 | 989 | |
29efc4f5 CK |
990 | amdgpu_ring_pad_ib(ring, params.ib); |
991 | WARN_ON(params.ib->length_dw > ndw); | |
2bd9ccfa CK |
992 | r = amdgpu_job_submit(job, ring, &vm->entity, |
993 | AMDGPU_FENCE_OWNER_VM, &f); | |
4af9f07c CZ |
994 | if (r) |
995 | goto error_free; | |
d38ceaf9 | 996 | |
bf60efd3 | 997 | amdgpu_bo_fence(vm->page_directory, f, true); |
284710fa CK |
998 | dma_fence_put(*fence); |
999 | *fence = f; | |
d38ceaf9 | 1000 | return 0; |
d5fc5e82 CZ |
1001 | |
1002 | error_free: | |
d71518b5 | 1003 | amdgpu_job_free(job); |
4af9f07c | 1004 | return r; |
d38ceaf9 AD |
1005 | } |
1006 | ||
a14faa65 CK |
1007 | /** |
1008 | * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks | |
1009 | * | |
1010 | * @adev: amdgpu_device pointer | |
3cabaa54 | 1011 | * @exclusive: fence we need to sync to |
8358dcee CK |
1012 | * @gtt_flags: flags as they are used for GTT |
1013 | * @pages_addr: DMA addresses to use for mapping | |
a14faa65 CK |
1014 | * @vm: requested vm |
1015 | * @mapping: mapped range and flags to use for the update | |
8358dcee | 1016 | * @flags: HW flags for the mapping |
63e0ba40 | 1017 | * @nodes: array of drm_mm_nodes with the MC addresses |
a14faa65 CK |
1018 | * @fence: optional resulting fence |
1019 | * | |
1020 | * Split the mapping into smaller chunks so that each update fits | |
1021 | * into a SDMA IB. | |
1022 | * Returns 0 for success, -EINVAL for failure. | |
1023 | */ | |
1024 | static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |
f54d1867 | 1025 | struct dma_fence *exclusive, |
a14faa65 | 1026 | uint32_t gtt_flags, |
8358dcee | 1027 | dma_addr_t *pages_addr, |
a14faa65 CK |
1028 | struct amdgpu_vm *vm, |
1029 | struct amdgpu_bo_va_mapping *mapping, | |
63e0ba40 CK |
1030 | uint32_t flags, |
1031 | struct drm_mm_node *nodes, | |
f54d1867 | 1032 | struct dma_fence **fence) |
a14faa65 | 1033 | { |
63e0ba40 | 1034 | uint64_t pfn, src = 0, start = mapping->it.start; |
a14faa65 CK |
1035 | int r; |
1036 | ||
1037 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | |
1038 | * but in case of something, we filter the flags in first place | |
1039 | */ | |
1040 | if (!(mapping->flags & AMDGPU_PTE_READABLE)) | |
1041 | flags &= ~AMDGPU_PTE_READABLE; | |
1042 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) | |
1043 | flags &= ~AMDGPU_PTE_WRITEABLE; | |
1044 | ||
1045 | trace_amdgpu_vm_bo_update(mapping); | |
1046 | ||
63e0ba40 CK |
1047 | pfn = mapping->offset >> PAGE_SHIFT; |
1048 | if (nodes) { | |
1049 | while (pfn >= nodes->size) { | |
1050 | pfn -= nodes->size; | |
1051 | ++nodes; | |
1052 | } | |
fa3ab3c7 | 1053 | } |
a14faa65 | 1054 | |
63e0ba40 CK |
1055 | do { |
1056 | uint64_t max_entries; | |
1057 | uint64_t addr, last; | |
a14faa65 | 1058 | |
63e0ba40 CK |
1059 | if (nodes) { |
1060 | addr = nodes->start << PAGE_SHIFT; | |
1061 | max_entries = (nodes->size - pfn) * | |
1062 | (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); | |
1063 | } else { | |
1064 | addr = 0; | |
1065 | max_entries = S64_MAX; | |
1066 | } | |
a14faa65 | 1067 | |
63e0ba40 CK |
1068 | if (pages_addr) { |
1069 | if (flags == gtt_flags) | |
1070 | src = adev->gart.table_addr + | |
1071 | (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; | |
1072 | else | |
1073 | max_entries = min(max_entries, 16ull * 1024ull); | |
1074 | addr = 0; | |
1075 | } else if (flags & AMDGPU_PTE_VALID) { | |
1076 | addr += adev->vm_manager.vram_base_offset; | |
1077 | } | |
1078 | addr += pfn << PAGE_SHIFT; | |
1079 | ||
1080 | last = min((uint64_t)mapping->it.last, start + max_entries - 1); | |
3cabaa54 CK |
1081 | r = amdgpu_vm_bo_update_mapping(adev, exclusive, |
1082 | src, pages_addr, vm, | |
a14faa65 CK |
1083 | start, last, flags, addr, |
1084 | fence); | |
1085 | if (r) | |
1086 | return r; | |
1087 | ||
63e0ba40 CK |
1088 | pfn += last - start + 1; |
1089 | if (nodes && nodes->size == pfn) { | |
1090 | pfn = 0; | |
1091 | ++nodes; | |
1092 | } | |
a14faa65 | 1093 | start = last + 1; |
63e0ba40 CK |
1094 | |
1095 | } while (unlikely(start != mapping->it.last + 1)); | |
a14faa65 CK |
1096 | |
1097 | return 0; | |
1098 | } | |
1099 | ||
d38ceaf9 AD |
1100 | /** |
1101 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table | |
1102 | * | |
1103 | * @adev: amdgpu_device pointer | |
1104 | * @bo_va: requested BO and VM object | |
99e124f4 | 1105 | * @clear: if true clear the entries |
d38ceaf9 AD |
1106 | * |
1107 | * Fill in the page table entries for @bo_va. | |
1108 | * Returns 0 for success, -EINVAL for failure. | |
d38ceaf9 AD |
1109 | */ |
1110 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |
1111 | struct amdgpu_bo_va *bo_va, | |
99e124f4 | 1112 | bool clear) |
d38ceaf9 AD |
1113 | { |
1114 | struct amdgpu_vm *vm = bo_va->vm; | |
1115 | struct amdgpu_bo_va_mapping *mapping; | |
8358dcee | 1116 | dma_addr_t *pages_addr = NULL; |
fa3ab3c7 | 1117 | uint32_t gtt_flags, flags; |
99e124f4 | 1118 | struct ttm_mem_reg *mem; |
63e0ba40 | 1119 | struct drm_mm_node *nodes; |
f54d1867 | 1120 | struct dma_fence *exclusive; |
d38ceaf9 AD |
1121 | int r; |
1122 | ||
a5f6b5b1 | 1123 | if (clear || !bo_va->bo) { |
99e124f4 | 1124 | mem = NULL; |
63e0ba40 | 1125 | nodes = NULL; |
99e124f4 CK |
1126 | exclusive = NULL; |
1127 | } else { | |
8358dcee CK |
1128 | struct ttm_dma_tt *ttm; |
1129 | ||
99e124f4 | 1130 | mem = &bo_va->bo->tbo.mem; |
63e0ba40 CK |
1131 | nodes = mem->mm_node; |
1132 | if (mem->mem_type == TTM_PL_TT) { | |
8358dcee CK |
1133 | ttm = container_of(bo_va->bo->tbo.ttm, struct |
1134 | ttm_dma_tt, ttm); | |
1135 | pages_addr = ttm->dma_address; | |
9ab21462 | 1136 | } |
3cabaa54 | 1137 | exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); |
d38ceaf9 AD |
1138 | } |
1139 | ||
a5f6b5b1 CK |
1140 | if (bo_va->bo) { |
1141 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); | |
1142 | gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && | |
1143 | adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? | |
1144 | flags : 0; | |
1145 | } else { | |
1146 | flags = 0x0; | |
1147 | gtt_flags = ~0x0; | |
1148 | } | |
d38ceaf9 | 1149 | |
7fc11959 CK |
1150 | spin_lock(&vm->status_lock); |
1151 | if (!list_empty(&bo_va->vm_status)) | |
1152 | list_splice_init(&bo_va->valids, &bo_va->invalids); | |
1153 | spin_unlock(&vm->status_lock); | |
1154 | ||
1155 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
3cabaa54 CK |
1156 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, |
1157 | gtt_flags, pages_addr, vm, | |
63e0ba40 | 1158 | mapping, flags, nodes, |
8358dcee | 1159 | &bo_va->last_pt_update); |
d38ceaf9 AD |
1160 | if (r) |
1161 | return r; | |
1162 | } | |
1163 | ||
d6c10f6b CK |
1164 | if (trace_amdgpu_vm_bo_mapping_enabled()) { |
1165 | list_for_each_entry(mapping, &bo_va->valids, list) | |
1166 | trace_amdgpu_vm_bo_mapping(mapping); | |
1167 | ||
1168 | list_for_each_entry(mapping, &bo_va->invalids, list) | |
1169 | trace_amdgpu_vm_bo_mapping(mapping); | |
1170 | } | |
1171 | ||
d38ceaf9 | 1172 | spin_lock(&vm->status_lock); |
6d1d0ef7 | 1173 | list_splice_init(&bo_va->invalids, &bo_va->valids); |
d38ceaf9 | 1174 | list_del_init(&bo_va->vm_status); |
99e124f4 | 1175 | if (clear) |
7fc11959 | 1176 | list_add(&bo_va->vm_status, &vm->cleared); |
d38ceaf9 AD |
1177 | spin_unlock(&vm->status_lock); |
1178 | ||
1179 | return 0; | |
1180 | } | |
1181 | ||
284710fa CK |
1182 | /** |
1183 | * amdgpu_vm_update_prt_state - update the global PRT state | |
1184 | */ | |
1185 | static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) | |
1186 | { | |
1187 | unsigned long flags; | |
1188 | bool enable; | |
1189 | ||
1190 | spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); | |
1191 | enable = !!atomic_read(&adev->vm_manager.num_prt_mappings); | |
1192 | adev->gart.gart_funcs->set_prt(adev, enable); | |
1193 | spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); | |
1194 | } | |
1195 | ||
0b15f2fc CK |
1196 | /** |
1197 | * amdgpu_vm_prt_put - drop a PRT user | |
1198 | */ | |
1199 | static void amdgpu_vm_prt_put(struct amdgpu_device *adev) | |
1200 | { | |
1201 | if (atomic_dec_return(&adev->vm_manager.num_prt_mappings) == 0) | |
1202 | amdgpu_vm_update_prt_state(adev); | |
1203 | } | |
1204 | ||
284710fa CK |
1205 | /** |
1206 | * amdgpu_vm_prt - callback for updating the PRT status | |
1207 | */ | |
1208 | static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) | |
1209 | { | |
1210 | struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); | |
1211 | ||
0b15f2fc | 1212 | amdgpu_vm_prt_put(cb->adev); |
284710fa CK |
1213 | kfree(cb); |
1214 | } | |
1215 | ||
1216 | /** | |
1217 | * amdgpu_vm_free_mapping - free a mapping | |
1218 | * | |
1219 | * @adev: amdgpu_device pointer | |
1220 | * @vm: requested vm | |
1221 | * @mapping: mapping to be freed | |
1222 | * @fence: fence of the unmap operation | |
1223 | * | |
1224 | * Free a mapping and make sure we decrease the PRT usage count if applicable. | |
1225 | */ | |
1226 | static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, | |
1227 | struct amdgpu_vm *vm, | |
1228 | struct amdgpu_bo_va_mapping *mapping, | |
1229 | struct dma_fence *fence) | |
1230 | { | |
1b04e412 | 1231 | if (mapping->flags & AMDGPU_PTE_PRT) { |
284710fa CK |
1232 | struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb), |
1233 | GFP_KERNEL); | |
1234 | ||
0b15f2fc CK |
1235 | if (!cb) { |
1236 | /* Last resort when we are OOM */ | |
1237 | if (fence) | |
1238 | dma_fence_wait(fence, false); | |
1239 | ||
1240 | amdgpu_vm_prt_put(cb->adev); | |
1241 | } else { | |
1242 | cb->adev = adev; | |
1243 | if (!fence || dma_fence_add_callback(fence, &cb->cb, | |
1244 | amdgpu_vm_prt_cb)) | |
1245 | amdgpu_vm_prt_cb(fence, &cb->cb); | |
1246 | } | |
284710fa CK |
1247 | } |
1248 | kfree(mapping); | |
1249 | } | |
1250 | ||
d38ceaf9 AD |
1251 | /** |
1252 | * amdgpu_vm_clear_freed - clear freed BOs in the PT | |
1253 | * | |
1254 | * @adev: amdgpu_device pointer | |
1255 | * @vm: requested vm | |
1256 | * | |
1257 | * Make sure all freed BOs are cleared in the PT. | |
1258 | * Returns 0 for success. | |
1259 | * | |
1260 | * PTs have to be reserved and mutex must be locked! | |
1261 | */ | |
1262 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |
1263 | struct amdgpu_vm *vm) | |
1264 | { | |
1265 | struct amdgpu_bo_va_mapping *mapping; | |
284710fa | 1266 | struct dma_fence *fence = NULL; |
d38ceaf9 AD |
1267 | int r; |
1268 | ||
1269 | while (!list_empty(&vm->freed)) { | |
1270 | mapping = list_first_entry(&vm->freed, | |
1271 | struct amdgpu_bo_va_mapping, list); | |
1272 | list_del(&mapping->list); | |
e17841b9 | 1273 | |
3cabaa54 | 1274 | r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, |
284710fa CK |
1275 | 0, 0, &fence); |
1276 | amdgpu_vm_free_mapping(adev, vm, mapping, fence); | |
1277 | if (r) { | |
1278 | dma_fence_put(fence); | |
d38ceaf9 | 1279 | return r; |
284710fa | 1280 | } |
d38ceaf9 AD |
1281 | |
1282 | } | |
284710fa | 1283 | dma_fence_put(fence); |
d38ceaf9 AD |
1284 | return 0; |
1285 | ||
1286 | } | |
1287 | ||
1288 | /** | |
1289 | * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT | |
1290 | * | |
1291 | * @adev: amdgpu_device pointer | |
1292 | * @vm: requested vm | |
1293 | * | |
1294 | * Make sure all invalidated BOs are cleared in the PT. | |
1295 | * Returns 0 for success. | |
1296 | * | |
1297 | * PTs have to be reserved and mutex must be locked! | |
1298 | */ | |
1299 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |
cfe2c978 | 1300 | struct amdgpu_vm *vm, struct amdgpu_sync *sync) |
d38ceaf9 | 1301 | { |
cfe2c978 | 1302 | struct amdgpu_bo_va *bo_va = NULL; |
91e1a520 | 1303 | int r = 0; |
d38ceaf9 AD |
1304 | |
1305 | spin_lock(&vm->status_lock); | |
1306 | while (!list_empty(&vm->invalidated)) { | |
1307 | bo_va = list_first_entry(&vm->invalidated, | |
1308 | struct amdgpu_bo_va, vm_status); | |
1309 | spin_unlock(&vm->status_lock); | |
32b41ac2 | 1310 | |
99e124f4 | 1311 | r = amdgpu_vm_bo_update(adev, bo_va, true); |
d38ceaf9 AD |
1312 | if (r) |
1313 | return r; | |
1314 | ||
1315 | spin_lock(&vm->status_lock); | |
1316 | } | |
1317 | spin_unlock(&vm->status_lock); | |
1318 | ||
cfe2c978 | 1319 | if (bo_va) |
bb1e38a4 | 1320 | r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); |
91e1a520 CK |
1321 | |
1322 | return r; | |
d38ceaf9 AD |
1323 | } |
1324 | ||
1325 | /** | |
1326 | * amdgpu_vm_bo_add - add a bo to a specific vm | |
1327 | * | |
1328 | * @adev: amdgpu_device pointer | |
1329 | * @vm: requested vm | |
1330 | * @bo: amdgpu buffer object | |
1331 | * | |
8843dbbb | 1332 | * Add @bo into the requested vm. |
d38ceaf9 AD |
1333 | * Add @bo to the list of bos associated with the vm |
1334 | * Returns newly added bo_va or NULL for failure | |
1335 | * | |
1336 | * Object has to be reserved! | |
1337 | */ | |
1338 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |
1339 | struct amdgpu_vm *vm, | |
1340 | struct amdgpu_bo *bo) | |
1341 | { | |
1342 | struct amdgpu_bo_va *bo_va; | |
1343 | ||
1344 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); | |
1345 | if (bo_va == NULL) { | |
1346 | return NULL; | |
1347 | } | |
1348 | bo_va->vm = vm; | |
1349 | bo_va->bo = bo; | |
d38ceaf9 AD |
1350 | bo_va->ref_count = 1; |
1351 | INIT_LIST_HEAD(&bo_va->bo_list); | |
7fc11959 CK |
1352 | INIT_LIST_HEAD(&bo_va->valids); |
1353 | INIT_LIST_HEAD(&bo_va->invalids); | |
d38ceaf9 | 1354 | INIT_LIST_HEAD(&bo_va->vm_status); |
32b41ac2 | 1355 | |
a5f6b5b1 CK |
1356 | if (bo) |
1357 | list_add_tail(&bo_va->bo_list, &bo->va); | |
d38ceaf9 AD |
1358 | |
1359 | return bo_va; | |
1360 | } | |
1361 | ||
1362 | /** | |
1363 | * amdgpu_vm_bo_map - map bo inside a vm | |
1364 | * | |
1365 | * @adev: amdgpu_device pointer | |
1366 | * @bo_va: bo_va to store the address | |
1367 | * @saddr: where to map the BO | |
1368 | * @offset: requested offset in the BO | |
1369 | * @flags: attributes of pages (read/write/valid/etc.) | |
1370 | * | |
1371 | * Add a mapping of the BO at the specefied addr into the VM. | |
1372 | * Returns 0 for success, error for failure. | |
1373 | * | |
49b02b18 | 1374 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
1375 | */ |
1376 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |
1377 | struct amdgpu_bo_va *bo_va, | |
1378 | uint64_t saddr, uint64_t offset, | |
268c3001 | 1379 | uint64_t size, uint64_t flags) |
d38ceaf9 AD |
1380 | { |
1381 | struct amdgpu_bo_va_mapping *mapping; | |
1382 | struct amdgpu_vm *vm = bo_va->vm; | |
1383 | struct interval_tree_node *it; | |
1384 | unsigned last_pfn, pt_idx; | |
1385 | uint64_t eaddr; | |
1386 | int r; | |
1387 | ||
0be52de9 CK |
1388 | /* validate the parameters */ |
1389 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | |
49b02b18 | 1390 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
0be52de9 | 1391 | return -EINVAL; |
0be52de9 | 1392 | |
284710fa CK |
1393 | if (flags & AMDGPU_PTE_PRT) { |
1394 | /* Check if we have PRT hardware support */ | |
1395 | if (!adev->gart.gart_funcs->set_prt) | |
1396 | return -EINVAL; | |
1397 | ||
1398 | if (atomic_inc_return(&adev->vm_manager.num_prt_mappings) == 1) | |
1399 | amdgpu_vm_update_prt_state(adev); | |
1400 | } | |
1401 | ||
d38ceaf9 | 1402 | /* make sure object fit at this offset */ |
005ae95e | 1403 | eaddr = saddr + size - 1; |
a5f6b5b1 CK |
1404 | if (saddr >= eaddr || |
1405 | (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) | |
d38ceaf9 | 1406 | return -EINVAL; |
d38ceaf9 AD |
1407 | |
1408 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | |
005ae95e FK |
1409 | if (last_pfn >= adev->vm_manager.max_pfn) { |
1410 | dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", | |
d38ceaf9 | 1411 | last_pfn, adev->vm_manager.max_pfn); |
d38ceaf9 AD |
1412 | return -EINVAL; |
1413 | } | |
1414 | ||
d38ceaf9 AD |
1415 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
1416 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | |
1417 | ||
005ae95e | 1418 | it = interval_tree_iter_first(&vm->va, saddr, eaddr); |
d38ceaf9 AD |
1419 | if (it) { |
1420 | struct amdgpu_bo_va_mapping *tmp; | |
1421 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | |
1422 | /* bo and tmp overlap, invalid addr */ | |
1423 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | |
1424 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | |
1425 | tmp->it.start, tmp->it.last + 1); | |
d38ceaf9 | 1426 | r = -EINVAL; |
f48b2659 | 1427 | goto error; |
d38ceaf9 AD |
1428 | } |
1429 | ||
1430 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | |
1431 | if (!mapping) { | |
d38ceaf9 | 1432 | r = -ENOMEM; |
f48b2659 | 1433 | goto error; |
d38ceaf9 AD |
1434 | } |
1435 | ||
1436 | INIT_LIST_HEAD(&mapping->list); | |
1437 | mapping->it.start = saddr; | |
005ae95e | 1438 | mapping->it.last = eaddr; |
d38ceaf9 AD |
1439 | mapping->offset = offset; |
1440 | mapping->flags = flags; | |
1441 | ||
7fc11959 | 1442 | list_add(&mapping->list, &bo_va->invalids); |
d38ceaf9 AD |
1443 | interval_tree_insert(&mapping->it, &vm->va); |
1444 | ||
1445 | /* Make sure the page tables are allocated */ | |
1446 | saddr >>= amdgpu_vm_block_size; | |
1447 | eaddr >>= amdgpu_vm_block_size; | |
1448 | ||
1449 | BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); | |
1450 | ||
1451 | if (eaddr > vm->max_pde_used) | |
1452 | vm->max_pde_used = eaddr; | |
1453 | ||
d38ceaf9 AD |
1454 | /* walk over the address space and allocate the page tables */ |
1455 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | |
bf60efd3 | 1456 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
d38ceaf9 AD |
1457 | struct amdgpu_bo *pt; |
1458 | ||
914b4dce | 1459 | if (vm->page_tables[pt_idx].bo) |
d38ceaf9 AD |
1460 | continue; |
1461 | ||
d38ceaf9 AD |
1462 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1463 | AMDGPU_GPU_PAGE_SIZE, true, | |
857d913d | 1464 | AMDGPU_GEM_DOMAIN_VRAM, |
1baa439f | 1465 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
03f48dd5 | 1466 | AMDGPU_GEM_CREATE_SHADOW | |
617859e0 CK |
1467 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | |
1468 | AMDGPU_GEM_CREATE_VRAM_CLEARED, | |
bf60efd3 | 1469 | NULL, resv, &pt); |
49b02b18 | 1470 | if (r) |
d38ceaf9 | 1471 | goto error_free; |
49b02b18 | 1472 | |
82b9c55b CK |
1473 | /* Keep a reference to the page table to avoid freeing |
1474 | * them up in the wrong order. | |
1475 | */ | |
1476 | pt->parent = amdgpu_bo_ref(vm->page_directory); | |
1477 | ||
914b4dce | 1478 | vm->page_tables[pt_idx].bo = pt; |
d38ceaf9 | 1479 | vm->page_tables[pt_idx].addr = 0; |
d38ceaf9 AD |
1480 | } |
1481 | ||
d38ceaf9 AD |
1482 | return 0; |
1483 | ||
1484 | error_free: | |
d38ceaf9 AD |
1485 | list_del(&mapping->list); |
1486 | interval_tree_remove(&mapping->it, &vm->va); | |
93e3e438 | 1487 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
284710fa | 1488 | amdgpu_vm_free_mapping(adev, vm, mapping, NULL); |
d38ceaf9 | 1489 | |
f48b2659 | 1490 | error: |
d38ceaf9 AD |
1491 | return r; |
1492 | } | |
1493 | ||
1494 | /** | |
1495 | * amdgpu_vm_bo_unmap - remove bo mapping from vm | |
1496 | * | |
1497 | * @adev: amdgpu_device pointer | |
1498 | * @bo_va: bo_va to remove the address from | |
1499 | * @saddr: where to the BO is mapped | |
1500 | * | |
1501 | * Remove a mapping of the BO at the specefied addr from the VM. | |
1502 | * Returns 0 for success, error for failure. | |
1503 | * | |
49b02b18 | 1504 | * Object has to be reserved and unreserved outside! |
d38ceaf9 AD |
1505 | */ |
1506 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |
1507 | struct amdgpu_bo_va *bo_va, | |
1508 | uint64_t saddr) | |
1509 | { | |
1510 | struct amdgpu_bo_va_mapping *mapping; | |
1511 | struct amdgpu_vm *vm = bo_va->vm; | |
7fc11959 | 1512 | bool valid = true; |
d38ceaf9 | 1513 | |
6c7fc503 | 1514 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
32b41ac2 | 1515 | |
7fc11959 | 1516 | list_for_each_entry(mapping, &bo_va->valids, list) { |
d38ceaf9 AD |
1517 | if (mapping->it.start == saddr) |
1518 | break; | |
1519 | } | |
1520 | ||
7fc11959 CK |
1521 | if (&mapping->list == &bo_va->valids) { |
1522 | valid = false; | |
1523 | ||
1524 | list_for_each_entry(mapping, &bo_va->invalids, list) { | |
1525 | if (mapping->it.start == saddr) | |
1526 | break; | |
1527 | } | |
1528 | ||
32b41ac2 | 1529 | if (&mapping->list == &bo_va->invalids) |
7fc11959 | 1530 | return -ENOENT; |
d38ceaf9 | 1531 | } |
32b41ac2 | 1532 | |
d38ceaf9 AD |
1533 | list_del(&mapping->list); |
1534 | interval_tree_remove(&mapping->it, &vm->va); | |
93e3e438 | 1535 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
d38ceaf9 | 1536 | |
e17841b9 | 1537 | if (valid) |
d38ceaf9 | 1538 | list_add(&mapping->list, &vm->freed); |
e17841b9 | 1539 | else |
284710fa CK |
1540 | amdgpu_vm_free_mapping(adev, vm, mapping, |
1541 | bo_va->last_pt_update); | |
d38ceaf9 AD |
1542 | |
1543 | return 0; | |
1544 | } | |
1545 | ||
1546 | /** | |
1547 | * amdgpu_vm_bo_rmv - remove a bo to a specific vm | |
1548 | * | |
1549 | * @adev: amdgpu_device pointer | |
1550 | * @bo_va: requested bo_va | |
1551 | * | |
8843dbbb | 1552 | * Remove @bo_va->bo from the requested vm. |
d38ceaf9 AD |
1553 | * |
1554 | * Object have to be reserved! | |
1555 | */ | |
1556 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |
1557 | struct amdgpu_bo_va *bo_va) | |
1558 | { | |
1559 | struct amdgpu_bo_va_mapping *mapping, *next; | |
1560 | struct amdgpu_vm *vm = bo_va->vm; | |
1561 | ||
1562 | list_del(&bo_va->bo_list); | |
1563 | ||
d38ceaf9 AD |
1564 | spin_lock(&vm->status_lock); |
1565 | list_del(&bo_va->vm_status); | |
1566 | spin_unlock(&vm->status_lock); | |
1567 | ||
7fc11959 | 1568 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
d38ceaf9 AD |
1569 | list_del(&mapping->list); |
1570 | interval_tree_remove(&mapping->it, &vm->va); | |
93e3e438 | 1571 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
7fc11959 CK |
1572 | list_add(&mapping->list, &vm->freed); |
1573 | } | |
1574 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | |
1575 | list_del(&mapping->list); | |
1576 | interval_tree_remove(&mapping->it, &vm->va); | |
284710fa CK |
1577 | amdgpu_vm_free_mapping(adev, vm, mapping, |
1578 | bo_va->last_pt_update); | |
d38ceaf9 | 1579 | } |
32b41ac2 | 1580 | |
f54d1867 | 1581 | dma_fence_put(bo_va->last_pt_update); |
d38ceaf9 | 1582 | kfree(bo_va); |
d38ceaf9 AD |
1583 | } |
1584 | ||
1585 | /** | |
1586 | * amdgpu_vm_bo_invalidate - mark the bo as invalid | |
1587 | * | |
1588 | * @adev: amdgpu_device pointer | |
1589 | * @vm: requested vm | |
1590 | * @bo: amdgpu buffer object | |
1591 | * | |
8843dbbb | 1592 | * Mark @bo as invalid. |
d38ceaf9 AD |
1593 | */ |
1594 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | |
1595 | struct amdgpu_bo *bo) | |
1596 | { | |
1597 | struct amdgpu_bo_va *bo_va; | |
1598 | ||
1599 | list_for_each_entry(bo_va, &bo->va, bo_list) { | |
7fc11959 CK |
1600 | spin_lock(&bo_va->vm->status_lock); |
1601 | if (list_empty(&bo_va->vm_status)) | |
d38ceaf9 | 1602 | list_add(&bo_va->vm_status, &bo_va->vm->invalidated); |
7fc11959 | 1603 | spin_unlock(&bo_va->vm->status_lock); |
d38ceaf9 AD |
1604 | } |
1605 | } | |
1606 | ||
1607 | /** | |
1608 | * amdgpu_vm_init - initialize a vm instance | |
1609 | * | |
1610 | * @adev: amdgpu_device pointer | |
1611 | * @vm: requested vm | |
1612 | * | |
8843dbbb | 1613 | * Init @vm fields. |
d38ceaf9 AD |
1614 | */ |
1615 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1616 | { | |
1617 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, | |
1618 | AMDGPU_VM_PTE_COUNT * 8); | |
9571e1d8 | 1619 | unsigned pd_size, pd_entries; |
2d55e45a CK |
1620 | unsigned ring_instance; |
1621 | struct amdgpu_ring *ring; | |
2bd9ccfa | 1622 | struct amd_sched_rq *rq; |
d38ceaf9 AD |
1623 | int i, r; |
1624 | ||
bcb1ba35 CK |
1625 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
1626 | vm->ids[i] = NULL; | |
d38ceaf9 | 1627 | vm->va = RB_ROOT; |
031e2983 | 1628 | vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); |
d38ceaf9 AD |
1629 | spin_lock_init(&vm->status_lock); |
1630 | INIT_LIST_HEAD(&vm->invalidated); | |
7fc11959 | 1631 | INIT_LIST_HEAD(&vm->cleared); |
d38ceaf9 | 1632 | INIT_LIST_HEAD(&vm->freed); |
20250215 | 1633 | |
d38ceaf9 AD |
1634 | pd_size = amdgpu_vm_directory_size(adev); |
1635 | pd_entries = amdgpu_vm_num_pdes(adev); | |
1636 | ||
1637 | /* allocate page table array */ | |
9571e1d8 | 1638 | vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); |
d38ceaf9 AD |
1639 | if (vm->page_tables == NULL) { |
1640 | DRM_ERROR("Cannot allocate memory for page table array\n"); | |
1641 | return -ENOMEM; | |
1642 | } | |
1643 | ||
2bd9ccfa | 1644 | /* create scheduler entity for page table updates */ |
2d55e45a CK |
1645 | |
1646 | ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); | |
1647 | ring_instance %= adev->vm_manager.vm_pte_num_rings; | |
1648 | ring = adev->vm_manager.vm_pte_rings[ring_instance]; | |
2bd9ccfa CK |
1649 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; |
1650 | r = amd_sched_entity_init(&ring->sched, &vm->entity, | |
1651 | rq, amdgpu_sched_jobs); | |
1652 | if (r) | |
64827adc | 1653 | goto err; |
2bd9ccfa | 1654 | |
05906dec BN |
1655 | vm->page_directory_fence = NULL; |
1656 | ||
d38ceaf9 | 1657 | r = amdgpu_bo_create(adev, pd_size, align, true, |
857d913d | 1658 | AMDGPU_GEM_DOMAIN_VRAM, |
1baa439f | 1659 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
03f48dd5 | 1660 | AMDGPU_GEM_CREATE_SHADOW | |
617859e0 CK |
1661 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | |
1662 | AMDGPU_GEM_CREATE_VRAM_CLEARED, | |
72d7668b | 1663 | NULL, NULL, &vm->page_directory); |
d38ceaf9 | 1664 | if (r) |
2bd9ccfa CK |
1665 | goto error_free_sched_entity; |
1666 | ||
ef9f0a83 | 1667 | r = amdgpu_bo_reserve(vm->page_directory, false); |
2bd9ccfa CK |
1668 | if (r) |
1669 | goto error_free_page_directory; | |
1670 | ||
5a712a87 | 1671 | vm->last_eviction_counter = atomic64_read(&adev->num_evictions); |
2a82ec21 | 1672 | amdgpu_bo_unreserve(vm->page_directory); |
d38ceaf9 AD |
1673 | |
1674 | return 0; | |
2bd9ccfa CK |
1675 | |
1676 | error_free_page_directory: | |
2698f620 | 1677 | amdgpu_bo_unref(&vm->page_directory->shadow); |
2bd9ccfa CK |
1678 | amdgpu_bo_unref(&vm->page_directory); |
1679 | vm->page_directory = NULL; | |
1680 | ||
1681 | error_free_sched_entity: | |
1682 | amd_sched_entity_fini(&ring->sched, &vm->entity); | |
1683 | ||
64827adc CZ |
1684 | err: |
1685 | drm_free_large(vm->page_tables); | |
1686 | ||
2bd9ccfa | 1687 | return r; |
d38ceaf9 AD |
1688 | } |
1689 | ||
1690 | /** | |
1691 | * amdgpu_vm_fini - tear down a vm instance | |
1692 | * | |
1693 | * @adev: amdgpu_device pointer | |
1694 | * @vm: requested vm | |
1695 | * | |
8843dbbb | 1696 | * Tear down @vm. |
d38ceaf9 AD |
1697 | * Unbind the VM and remove all bos from the vm bo list |
1698 | */ | |
1699 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |
1700 | { | |
1701 | struct amdgpu_bo_va_mapping *mapping, *tmp; | |
1702 | int i; | |
1703 | ||
2d55e45a | 1704 | amd_sched_entity_fini(vm->entity.sched, &vm->entity); |
2bd9ccfa | 1705 | |
d38ceaf9 AD |
1706 | if (!RB_EMPTY_ROOT(&vm->va)) { |
1707 | dev_err(adev->dev, "still active bo inside vm\n"); | |
1708 | } | |
1709 | rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { | |
1710 | list_del(&mapping->list); | |
1711 | interval_tree_remove(&mapping->it, &vm->va); | |
1712 | kfree(mapping); | |
1713 | } | |
1714 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { | |
284710fa CK |
1715 | if (mapping->flags & AMDGPU_PTE_PRT) |
1716 | continue; | |
1717 | ||
d38ceaf9 AD |
1718 | list_del(&mapping->list); |
1719 | kfree(mapping); | |
1720 | } | |
284710fa | 1721 | amdgpu_vm_clear_freed(adev, vm); |
d38ceaf9 | 1722 | |
1baa439f | 1723 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { |
914b4dce | 1724 | struct amdgpu_bo *pt = vm->page_tables[i].bo; |
2698f620 CK |
1725 | |
1726 | if (!pt) | |
1727 | continue; | |
1728 | ||
1729 | amdgpu_bo_unref(&pt->shadow); | |
1730 | amdgpu_bo_unref(&pt); | |
1baa439f | 1731 | } |
9571e1d8 | 1732 | drm_free_large(vm->page_tables); |
d38ceaf9 | 1733 | |
2698f620 | 1734 | amdgpu_bo_unref(&vm->page_directory->shadow); |
d38ceaf9 | 1735 | amdgpu_bo_unref(&vm->page_directory); |
f54d1867 | 1736 | dma_fence_put(vm->page_directory_fence); |
d38ceaf9 | 1737 | } |
ea89f8c9 | 1738 | |
a9a78b32 CK |
1739 | /** |
1740 | * amdgpu_vm_manager_init - init the VM manager | |
1741 | * | |
1742 | * @adev: amdgpu_device pointer | |
1743 | * | |
1744 | * Initialize the VM manager structures | |
1745 | */ | |
1746 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) | |
1747 | { | |
1748 | unsigned i; | |
1749 | ||
1750 | INIT_LIST_HEAD(&adev->vm_manager.ids_lru); | |
1751 | ||
1752 | /* skip over VMID 0, since it is the system VM */ | |
971fe9a9 CK |
1753 | for (i = 1; i < adev->vm_manager.num_ids; ++i) { |
1754 | amdgpu_vm_reset_id(adev, i); | |
832a902f | 1755 | amdgpu_sync_create(&adev->vm_manager.ids[i].active); |
a9a78b32 CK |
1756 | list_add_tail(&adev->vm_manager.ids[i].list, |
1757 | &adev->vm_manager.ids_lru); | |
971fe9a9 | 1758 | } |
2d55e45a | 1759 | |
f54d1867 CW |
1760 | adev->vm_manager.fence_context = |
1761 | dma_fence_context_alloc(AMDGPU_MAX_RINGS); | |
1fbb2e92 CK |
1762 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
1763 | adev->vm_manager.seqno[i] = 0; | |
1764 | ||
2d55e45a | 1765 | atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); |
b1c8a81f | 1766 | atomic64_set(&adev->vm_manager.client_counter, 0); |
284710fa CK |
1767 | spin_lock_init(&adev->vm_manager.prt_lock); |
1768 | atomic_set(&adev->vm_manager.num_prt_mappings, 0); | |
a9a78b32 CK |
1769 | } |
1770 | ||
ea89f8c9 CK |
1771 | /** |
1772 | * amdgpu_vm_manager_fini - cleanup VM manager | |
1773 | * | |
1774 | * @adev: amdgpu_device pointer | |
1775 | * | |
1776 | * Cleanup the VM manager and free resources. | |
1777 | */ | |
1778 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | |
1779 | { | |
1780 | unsigned i; | |
1781 | ||
bcb1ba35 CK |
1782 | for (i = 0; i < AMDGPU_NUM_VM; ++i) { |
1783 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; | |
1784 | ||
f54d1867 | 1785 | dma_fence_put(adev->vm_manager.ids[i].first); |
832a902f | 1786 | amdgpu_sync_free(&adev->vm_manager.ids[i].active); |
f54d1867 | 1787 | dma_fence_put(id->flushed_updates); |
7b624ad8 | 1788 | dma_fence_put(id->last_flush); |
bcb1ba35 | 1789 | } |
ea89f8c9 | 1790 | } |