2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/fence-array.h>
30 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_trace.h"
36 * GPUVM is similar to the legacy gart on older asics, however
37 * rather than there being a single global gart table
38 * for the entire GPU, there are multiple VM page tables active
39 * at any given time. The VM page tables can contain a mix
40 * vram pages and system memory pages and system memory pages
41 * can be mapped as snooped (cached system pages) or unsnooped
42 * (uncached system pages).
43 * Each VM has an ID associated with it and there is a page table
44 * associated with each VMID. When execting a command buffer,
45 * the kernel tells the the ring what VMID to use for that command
46 * buffer. VMIDs are allocated dynamically as commands are submitted.
47 * The userspace drivers maintain their own address space and the kernel
48 * sets up their pages tables accordingly when they submit their
49 * command buffers and a VMID is assigned.
50 * Cayman/Trinity support up to 8 active VMs at any given time;
54 /* Special value that no flush is necessary */
55 #define AMDGPU_VM_NO_FLUSH (~0ll)
57 /* Local structure. Encapsulate some VM table update parameters to reduce
58 * the number of function parameters
60 struct amdgpu_vm_update_params
{
61 /* address where to copy page table entries from */
63 /* DMA addresses to use for mapping */
64 dma_addr_t
*pages_addr
;
65 /* indirect buffer to fill with commands */
70 * amdgpu_vm_num_pde - return the number of page directory entries
72 * @adev: amdgpu_device pointer
74 * Calculate the number of page directory entries.
76 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device
*adev
)
78 return adev
->vm_manager
.max_pfn
>> amdgpu_vm_block_size
;
82 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
84 * @adev: amdgpu_device pointer
86 * Calculate the size of the page directory in bytes.
88 static unsigned amdgpu_vm_directory_size(struct amdgpu_device
*adev
)
90 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev
) * 8);
94 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
96 * @vm: vm providing the BOs
97 * @validated: head of validation list
98 * @entry: entry to add
100 * Add the page directory to the list of BOs to
101 * validate for command submission.
103 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
104 struct list_head
*validated
,
105 struct amdgpu_bo_list_entry
*entry
)
107 entry
->robj
= vm
->page_directory
;
109 entry
->tv
.bo
= &vm
->page_directory
->tbo
;
110 entry
->tv
.shared
= true;
111 entry
->user_pages
= NULL
;
112 list_add(&entry
->tv
.head
, validated
);
116 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
118 * @adev: amdgpu device pointer
119 * @vm: vm providing the BOs
120 * @duplicates: head of duplicates list
122 * Add the page directory to the BO duplicates list
123 * for command submission.
125 void amdgpu_vm_get_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
126 struct list_head
*duplicates
)
128 uint64_t num_evictions
;
131 /* We only need to validate the page tables
132 * if they aren't already valid.
134 num_evictions
= atomic64_read(&adev
->num_evictions
);
135 if (num_evictions
== vm
->last_eviction_counter
)
138 /* add the vm page table to the list */
139 for (i
= 0; i
<= vm
->max_pde_used
; ++i
) {
140 struct amdgpu_bo_list_entry
*entry
= &vm
->page_tables
[i
].entry
;
145 list_add(&entry
->tv
.head
, duplicates
);
151 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
153 * @adev: amdgpu device instance
154 * @vm: vm providing the BOs
156 * Move the PT BOs to the tail of the LRU.
158 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device
*adev
,
159 struct amdgpu_vm
*vm
)
161 struct ttm_bo_global
*glob
= adev
->mman
.bdev
.glob
;
164 spin_lock(&glob
->lru_lock
);
165 for (i
= 0; i
<= vm
->max_pde_used
; ++i
) {
166 struct amdgpu_bo_list_entry
*entry
= &vm
->page_tables
[i
].entry
;
171 ttm_bo_move_to_lru_tail(&entry
->robj
->tbo
);
173 spin_unlock(&glob
->lru_lock
);
176 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device
*adev
,
177 struct amdgpu_vm_id
*id
)
179 return id
->current_gpu_reset_count
!=
180 atomic_read(&adev
->gpu_reset_counter
) ? true : false;
184 * amdgpu_vm_grab_id - allocate the next free VMID
186 * @vm: vm to allocate id for
187 * @ring: ring we want to submit job to
188 * @sync: sync object where we add dependencies
189 * @fence: fence protecting ID from reuse
191 * Allocate an id for the vm, adding fences to the sync obj as necessary.
193 int amdgpu_vm_grab_id(struct amdgpu_vm
*vm
, struct amdgpu_ring
*ring
,
194 struct amdgpu_sync
*sync
, struct fence
*fence
,
195 struct amdgpu_job
*job
)
197 struct amdgpu_device
*adev
= ring
->adev
;
198 uint64_t fence_context
= adev
->fence_context
+ ring
->idx
;
199 struct fence
*updates
= sync
->last_vm_update
;
200 struct amdgpu_vm_id
*id
, *idle
;
201 struct fence
**fences
;
205 fences
= kmalloc_array(sizeof(void *), adev
->vm_manager
.num_ids
,
210 mutex_lock(&adev
->vm_manager
.lock
);
212 /* Check if we have an idle VMID */
214 list_for_each_entry(idle
, &adev
->vm_manager
.ids_lru
, list
) {
215 fences
[i
] = amdgpu_sync_peek_fence(&idle
->active
, ring
);
221 /* If we can't find a idle VMID to use, wait till one becomes available */
222 if (&idle
->list
== &adev
->vm_manager
.ids_lru
) {
223 u64 fence_context
= adev
->vm_manager
.fence_context
+ ring
->idx
;
224 unsigned seqno
= ++adev
->vm_manager
.seqno
[ring
->idx
];
225 struct fence_array
*array
;
228 for (j
= 0; j
< i
; ++j
)
229 fence_get(fences
[j
]);
231 array
= fence_array_create(i
, fences
, fence_context
,
234 for (j
= 0; j
< i
; ++j
)
235 fence_put(fences
[j
]);
242 r
= amdgpu_sync_fence(ring
->adev
, sync
, &array
->base
);
243 fence_put(&array
->base
);
247 mutex_unlock(&adev
->vm_manager
.lock
);
253 job
->vm_needs_flush
= true;
254 /* Check if we can use a VMID already assigned to this VM */
257 struct fence
*flushed
;
260 if (i
== AMDGPU_MAX_RINGS
)
263 /* Check all the prerequisites to using this VMID */
266 if (amdgpu_vm_is_gpu_reset(adev
, id
))
269 if (atomic64_read(&id
->owner
) != vm
->client_id
)
272 if (job
->vm_pd_addr
!= id
->pd_gpu_addr
)
278 if (id
->last_flush
->context
!= fence_context
&&
279 !fence_is_signaled(id
->last_flush
))
282 flushed
= id
->flushed_updates
;
284 (!flushed
|| fence_is_later(updates
, flushed
)))
287 /* Good we can use this VMID. Remember this submission as
290 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
294 id
->current_gpu_reset_count
= atomic_read(&adev
->gpu_reset_counter
);
295 list_move_tail(&id
->list
, &adev
->vm_manager
.ids_lru
);
296 vm
->ids
[ring
->idx
] = id
;
298 job
->vm_id
= id
- adev
->vm_manager
.ids
;
299 job
->vm_needs_flush
= false;
300 trace_amdgpu_vm_grab_id(vm
, ring
->idx
, job
);
302 mutex_unlock(&adev
->vm_manager
.lock
);
305 } while (i
!= ring
->idx
);
307 /* Still no ID to use? Then use the idle one found earlier */
310 /* Remember this submission as user of the VMID */
311 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
315 fence_put(id
->first
);
316 id
->first
= fence_get(fence
);
318 fence_put(id
->last_flush
);
319 id
->last_flush
= NULL
;
321 fence_put(id
->flushed_updates
);
322 id
->flushed_updates
= fence_get(updates
);
324 id
->pd_gpu_addr
= job
->vm_pd_addr
;
325 id
->current_gpu_reset_count
= atomic_read(&adev
->gpu_reset_counter
);
326 list_move_tail(&id
->list
, &adev
->vm_manager
.ids_lru
);
327 atomic64_set(&id
->owner
, vm
->client_id
);
328 vm
->ids
[ring
->idx
] = id
;
330 job
->vm_id
= id
- adev
->vm_manager
.ids
;
331 trace_amdgpu_vm_grab_id(vm
, ring
->idx
, job
);
334 mutex_unlock(&adev
->vm_manager
.lock
);
338 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring
*ring
)
340 struct amdgpu_device
*adev
= ring
->adev
;
341 const struct amdgpu_ip_block_version
*ip_block
;
343 if (ring
->type
!= AMDGPU_RING_TYPE_COMPUTE
)
344 /* only compute rings */
347 ip_block
= amdgpu_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
351 if (ip_block
->major
<= 7) {
352 /* gfx7 has no workaround */
354 } else if (ip_block
->major
== 8) {
355 if (adev
->gfx
.mec_fw_version
>= 673)
356 /* gfx8 is fixed in MEC firmware 673 */
365 * amdgpu_vm_flush - hardware flush the vm
367 * @ring: ring to use for flush
368 * @vm_id: vmid number to use
369 * @pd_addr: address of the page directory
371 * Emit a VM flush when it is necessary.
373 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
)
375 struct amdgpu_device
*adev
= ring
->adev
;
376 struct amdgpu_vm_id
*id
= &adev
->vm_manager
.ids
[job
->vm_id
];
377 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
378 id
->gds_base
!= job
->gds_base
||
379 id
->gds_size
!= job
->gds_size
||
380 id
->gws_base
!= job
->gws_base
||
381 id
->gws_size
!= job
->gws_size
||
382 id
->oa_base
!= job
->oa_base
||
383 id
->oa_size
!= job
->oa_size
);
386 if (ring
->funcs
->emit_pipeline_sync
&& (
387 job
->vm_needs_flush
|| gds_switch_needed
||
388 amdgpu_vm_ring_has_compute_vm_bug(ring
)))
389 amdgpu_ring_emit_pipeline_sync(ring
);
391 if (ring
->funcs
->emit_vm_flush
&& (job
->vm_needs_flush
||
392 amdgpu_vm_is_gpu_reset(adev
, id
))) {
395 trace_amdgpu_vm_flush(job
->vm_pd_addr
, ring
->idx
, job
->vm_id
);
396 amdgpu_ring_emit_vm_flush(ring
, job
->vm_id
, job
->vm_pd_addr
);
398 r
= amdgpu_fence_emit(ring
, &fence
);
402 mutex_lock(&adev
->vm_manager
.lock
);
403 fence_put(id
->last_flush
);
404 id
->last_flush
= fence
;
405 mutex_unlock(&adev
->vm_manager
.lock
);
408 if (gds_switch_needed
) {
409 id
->gds_base
= job
->gds_base
;
410 id
->gds_size
= job
->gds_size
;
411 id
->gws_base
= job
->gws_base
;
412 id
->gws_size
= job
->gws_size
;
413 id
->oa_base
= job
->oa_base
;
414 id
->oa_size
= job
->oa_size
;
415 amdgpu_ring_emit_gds_switch(ring
, job
->vm_id
,
416 job
->gds_base
, job
->gds_size
,
417 job
->gws_base
, job
->gws_size
,
418 job
->oa_base
, job
->oa_size
);
425 * amdgpu_vm_reset_id - reset VMID to zero
427 * @adev: amdgpu device structure
428 * @vm_id: vmid number to use
430 * Reset saved GDW, GWS and OA to force switch on next flush.
432 void amdgpu_vm_reset_id(struct amdgpu_device
*adev
, unsigned vm_id
)
434 struct amdgpu_vm_id
*id
= &adev
->vm_manager
.ids
[vm_id
];
445 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
448 * @bo: requested buffer object
450 * Find @bo inside the requested vm.
451 * Search inside the @bos vm list for the requested vm
452 * Returns the found bo_va or NULL if none is found
454 * Object has to be reserved!
456 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
457 struct amdgpu_bo
*bo
)
459 struct amdgpu_bo_va
*bo_va
;
461 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
462 if (bo_va
->vm
== vm
) {
470 * amdgpu_vm_update_pages - helper to call the right asic function
472 * @adev: amdgpu_device pointer
473 * @vm_update_params: see amdgpu_vm_update_params definition
474 * @pe: addr of the page entry
475 * @addr: dst addr to write into pe
476 * @count: number of page entries to update
477 * @incr: increase next addr by incr bytes
478 * @flags: hw access flags
480 * Traces the parameters and calls the right asic functions
481 * to setup the page table using the DMA.
483 static void amdgpu_vm_update_pages(struct amdgpu_device
*adev
,
484 struct amdgpu_vm_update_params
486 uint64_t pe
, uint64_t addr
,
487 unsigned count
, uint32_t incr
,
490 trace_amdgpu_vm_set_page(pe
, addr
, count
, incr
, flags
);
492 if (vm_update_params
->src
) {
493 amdgpu_vm_copy_pte(adev
, vm_update_params
->ib
,
494 pe
, (vm_update_params
->src
+ (addr
>> 12) * 8), count
);
496 } else if (vm_update_params
->pages_addr
) {
497 amdgpu_vm_write_pte(adev
, vm_update_params
->ib
,
498 vm_update_params
->pages_addr
,
499 pe
, addr
, count
, incr
, flags
);
501 } else if (count
< 3) {
502 amdgpu_vm_write_pte(adev
, vm_update_params
->ib
, NULL
, pe
, addr
,
506 amdgpu_vm_set_pte_pde(adev
, vm_update_params
->ib
, pe
, addr
,
512 * amdgpu_vm_clear_bo - initially clear the page dir/table
514 * @adev: amdgpu_device pointer
517 * need to reserve bo first before calling it.
519 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
520 struct amdgpu_vm
*vm
,
521 struct amdgpu_bo
*bo
)
523 struct amdgpu_ring
*ring
;
524 struct fence
*fence
= NULL
;
525 struct amdgpu_job
*job
;
526 struct amdgpu_vm_update_params vm_update_params
;
531 memset(&vm_update_params
, 0, sizeof(vm_update_params
));
532 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
534 r
= reservation_object_reserve_shared(bo
->tbo
.resv
);
538 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
542 addr
= amdgpu_bo_gpu_offset(bo
);
543 entries
= amdgpu_bo_size(bo
) / 8;
545 r
= amdgpu_job_alloc_with_ib(adev
, 64, &job
);
549 vm_update_params
.ib
= &job
->ibs
[0];
550 amdgpu_vm_update_pages(adev
, &vm_update_params
, addr
, 0, entries
,
552 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
554 WARN_ON(job
->ibs
[0].length_dw
> 64);
555 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
556 AMDGPU_FENCE_OWNER_VM
, &fence
);
560 amdgpu_bo_fence(bo
, fence
, true);
565 amdgpu_job_free(job
);
572 * amdgpu_vm_map_gart - Resolve gart mapping of addr
574 * @pages_addr: optional DMA address to use for lookup
575 * @addr: the unmapped addr
577 * Look up the physical address of the page that the pte resolves
578 * to and return the pointer for the page table entry.
580 uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
585 /* page table offset */
586 result
= pages_addr
[addr
>> PAGE_SHIFT
];
588 /* in case cpu page size != gpu page size*/
589 result
|= addr
& (~PAGE_MASK
);
592 /* No mapping required */
596 result
&= 0xFFFFFFFFFFFFF000ULL
;
602 * amdgpu_vm_update_pdes - make sure that page directory is valid
604 * @adev: amdgpu_device pointer
606 * @start: start of GPU address range
607 * @end: end of GPU address range
609 * Allocates new page tables if necessary
610 * and updates the page directory.
611 * Returns 0 for success, error for failure.
613 int amdgpu_vm_update_page_directory(struct amdgpu_device
*adev
,
614 struct amdgpu_vm
*vm
)
616 struct amdgpu_ring
*ring
;
617 struct amdgpu_bo
*pd
= vm
->page_directory
;
618 uint64_t pd_addr
= amdgpu_bo_gpu_offset(pd
);
619 uint32_t incr
= AMDGPU_VM_PTE_COUNT
* 8;
620 uint64_t last_pde
= ~0, last_pt
= ~0;
621 unsigned count
= 0, pt_idx
, ndw
;
622 struct amdgpu_job
*job
;
623 struct amdgpu_vm_update_params vm_update_params
;
624 struct fence
*fence
= NULL
;
628 memset(&vm_update_params
, 0, sizeof(vm_update_params
));
629 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
634 /* assume the worst case */
635 ndw
+= vm
->max_pde_used
* 6;
637 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
641 vm_update_params
.ib
= &job
->ibs
[0];
643 /* walk over the address space and update the page directory */
644 for (pt_idx
= 0; pt_idx
<= vm
->max_pde_used
; ++pt_idx
) {
645 struct amdgpu_bo
*bo
= vm
->page_tables
[pt_idx
].entry
.robj
;
651 pt
= amdgpu_bo_gpu_offset(bo
);
652 if (vm
->page_tables
[pt_idx
].addr
== pt
)
654 vm
->page_tables
[pt_idx
].addr
= pt
;
656 pde
= pd_addr
+ pt_idx
* 8;
657 if (((last_pde
+ 8 * count
) != pde
) ||
658 ((last_pt
+ incr
* count
) != pt
)) {
661 amdgpu_vm_update_pages(adev
, &vm_update_params
,
676 amdgpu_vm_update_pages(adev
, &vm_update_params
,
678 count
, incr
, AMDGPU_PTE_VALID
);
680 if (vm_update_params
.ib
->length_dw
!= 0) {
681 amdgpu_ring_pad_ib(ring
, vm_update_params
.ib
);
682 amdgpu_sync_resv(adev
, &job
->sync
, pd
->tbo
.resv
,
683 AMDGPU_FENCE_OWNER_VM
);
684 WARN_ON(vm_update_params
.ib
->length_dw
> ndw
);
685 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
686 AMDGPU_FENCE_OWNER_VM
, &fence
);
690 amdgpu_bo_fence(pd
, fence
, true);
691 fence_put(vm
->page_directory_fence
);
692 vm
->page_directory_fence
= fence_get(fence
);
696 amdgpu_job_free(job
);
702 amdgpu_job_free(job
);
707 * amdgpu_vm_frag_ptes - add fragment information to PTEs
709 * @adev: amdgpu_device pointer
710 * @vm_update_params: see amdgpu_vm_update_params definition
711 * @pe_start: first PTE to handle
712 * @pe_end: last PTE to handle
713 * @addr: addr those PTEs should point to
714 * @flags: hw mapping flags
716 static void amdgpu_vm_frag_ptes(struct amdgpu_device
*adev
,
717 struct amdgpu_vm_update_params
719 uint64_t pe_start
, uint64_t pe_end
,
720 uint64_t addr
, uint32_t flags
)
723 * The MC L1 TLB supports variable sized pages, based on a fragment
724 * field in the PTE. When this field is set to a non-zero value, page
725 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
726 * flags are considered valid for all PTEs within the fragment range
727 * and corresponding mappings are assumed to be physically contiguous.
729 * The L1 TLB can store a single PTE for the whole fragment,
730 * significantly increasing the space available for translation
731 * caching. This leads to large improvements in throughput when the
732 * TLB is under pressure.
734 * The L2 TLB distributes small and large fragments into two
735 * asymmetric partitions. The large fragment cache is significantly
736 * larger. Thus, we try to use large fragments wherever possible.
737 * Userspace can support this by aligning virtual base address and
738 * allocation size to the fragment size.
741 /* SI and newer are optimized for 64KB */
742 uint64_t frag_flags
= AMDGPU_PTE_FRAG_64KB
;
743 uint64_t frag_align
= 0x80;
745 uint64_t frag_start
= ALIGN(pe_start
, frag_align
);
746 uint64_t frag_end
= pe_end
& ~(frag_align
- 1);
750 /* Abort early if there isn't anything to do */
751 if (pe_start
== pe_end
)
754 /* system pages are non continuously */
755 if (vm_update_params
->src
|| vm_update_params
->pages_addr
||
756 !(flags
& AMDGPU_PTE_VALID
) || (frag_start
>= frag_end
)) {
758 count
= (pe_end
- pe_start
) / 8;
759 amdgpu_vm_update_pages(adev
, vm_update_params
, pe_start
,
760 addr
, count
, AMDGPU_GPU_PAGE_SIZE
,
765 /* handle the 4K area at the beginning */
766 if (pe_start
!= frag_start
) {
767 count
= (frag_start
- pe_start
) / 8;
768 amdgpu_vm_update_pages(adev
, vm_update_params
, pe_start
, addr
,
769 count
, AMDGPU_GPU_PAGE_SIZE
, flags
);
770 addr
+= AMDGPU_GPU_PAGE_SIZE
* count
;
773 /* handle the area in the middle */
774 count
= (frag_end
- frag_start
) / 8;
775 amdgpu_vm_update_pages(adev
, vm_update_params
, frag_start
, addr
, count
,
776 AMDGPU_GPU_PAGE_SIZE
, flags
| frag_flags
);
778 /* handle the 4K area at the end */
779 if (frag_end
!= pe_end
) {
780 addr
+= AMDGPU_GPU_PAGE_SIZE
* count
;
781 count
= (pe_end
- frag_end
) / 8;
782 amdgpu_vm_update_pages(adev
, vm_update_params
, frag_end
, addr
,
783 count
, AMDGPU_GPU_PAGE_SIZE
, flags
);
788 * amdgpu_vm_update_ptes - make sure that page tables are valid
790 * @adev: amdgpu_device pointer
791 * @vm_update_params: see amdgpu_vm_update_params definition
793 * @start: start of GPU address range
794 * @end: end of GPU address range
795 * @dst: destination address to map to, the next dst inside the function
796 * @flags: mapping flags
798 * Update the page tables in the range @start - @end.
800 static void amdgpu_vm_update_ptes(struct amdgpu_device
*adev
,
801 struct amdgpu_vm_update_params
803 struct amdgpu_vm
*vm
,
804 uint64_t start
, uint64_t end
,
805 uint64_t dst
, uint32_t flags
)
807 const uint64_t mask
= AMDGPU_VM_PTE_COUNT
- 1;
809 uint64_t cur_pe_start
, cur_pe_end
, cur_dst
;
810 uint64_t addr
; /* next GPU address to be updated */
812 struct amdgpu_bo
*pt
;
813 unsigned nptes
; /* next number of ptes to be updated */
814 uint64_t next_pe_start
;
816 /* initialize the variables */
818 pt_idx
= addr
>> amdgpu_vm_block_size
;
819 pt
= vm
->page_tables
[pt_idx
].entry
.robj
;
821 if ((addr
& ~mask
) == (end
& ~mask
))
824 nptes
= AMDGPU_VM_PTE_COUNT
- (addr
& mask
);
826 cur_pe_start
= amdgpu_bo_gpu_offset(pt
);
827 cur_pe_start
+= (addr
& mask
) * 8;
828 cur_pe_end
= cur_pe_start
+ 8 * nptes
;
833 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
835 /* walk over the address space and update the page tables */
837 pt_idx
= addr
>> amdgpu_vm_block_size
;
838 pt
= vm
->page_tables
[pt_idx
].entry
.robj
;
840 if ((addr
& ~mask
) == (end
& ~mask
))
843 nptes
= AMDGPU_VM_PTE_COUNT
- (addr
& mask
);
845 next_pe_start
= amdgpu_bo_gpu_offset(pt
);
846 next_pe_start
+= (addr
& mask
) * 8;
848 if (cur_pe_end
== next_pe_start
) {
849 /* The next ptb is consecutive to current ptb.
850 * Don't call amdgpu_vm_frag_ptes now.
851 * Will update two ptbs together in future.
853 cur_pe_end
+= 8 * nptes
;
855 amdgpu_vm_frag_ptes(adev
, vm_update_params
,
856 cur_pe_start
, cur_pe_end
,
859 cur_pe_start
= next_pe_start
;
860 cur_pe_end
= next_pe_start
+ 8 * nptes
;
866 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
869 amdgpu_vm_frag_ptes(adev
, vm_update_params
, cur_pe_start
,
870 cur_pe_end
, cur_dst
, flags
);
874 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
876 * @adev: amdgpu_device pointer
877 * @exclusive: fence we need to sync to
878 * @src: address where to copy page table entries from
879 * @pages_addr: DMA addresses to use for mapping
881 * @start: start of mapped range
882 * @last: last mapped entry
883 * @flags: flags for the entries
884 * @addr: addr to set the area to
885 * @fence: optional resulting fence
887 * Fill in the page table entries between @start and @last.
888 * Returns 0 for success, -EINVAL for failure.
890 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
891 struct fence
*exclusive
,
893 dma_addr_t
*pages_addr
,
894 struct amdgpu_vm
*vm
,
895 uint64_t start
, uint64_t last
,
896 uint32_t flags
, uint64_t addr
,
897 struct fence
**fence
)
899 struct amdgpu_ring
*ring
;
900 void *owner
= AMDGPU_FENCE_OWNER_VM
;
901 unsigned nptes
, ncmds
, ndw
;
902 struct amdgpu_job
*job
;
903 struct amdgpu_vm_update_params vm_update_params
;
904 struct fence
*f
= NULL
;
907 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
908 memset(&vm_update_params
, 0, sizeof(vm_update_params
));
909 vm_update_params
.src
= src
;
910 vm_update_params
.pages_addr
= pages_addr
;
912 /* sync to everything on unmapping */
913 if (!(flags
& AMDGPU_PTE_VALID
))
914 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
916 nptes
= last
- start
+ 1;
919 * reserve space for one command every (1 << BLOCK_SIZE)
920 * entries or 2k dwords (whatever is smaller)
922 ncmds
= (nptes
>> min(amdgpu_vm_block_size
, 11)) + 1;
927 if (vm_update_params
.src
) {
928 /* only copy commands needed */
931 } else if (vm_update_params
.pages_addr
) {
932 /* header for write data commands */
935 /* body of write data command */
939 /* set page commands needed */
942 /* two extra commands for begin/end of fragment */
946 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
950 vm_update_params
.ib
= &job
->ibs
[0];
952 r
= amdgpu_sync_fence(adev
, &job
->sync
, exclusive
);
956 r
= amdgpu_sync_resv(adev
, &job
->sync
, vm
->page_directory
->tbo
.resv
,
961 r
= reservation_object_reserve_shared(vm
->page_directory
->tbo
.resv
);
965 amdgpu_vm_update_ptes(adev
, &vm_update_params
, vm
, start
,
966 last
+ 1, addr
, flags
);
968 amdgpu_ring_pad_ib(ring
, vm_update_params
.ib
);
969 WARN_ON(vm_update_params
.ib
->length_dw
> ndw
);
970 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
971 AMDGPU_FENCE_OWNER_VM
, &f
);
975 amdgpu_bo_fence(vm
->page_directory
, f
, true);
978 *fence
= fence_get(f
);
984 amdgpu_job_free(job
);
989 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
991 * @adev: amdgpu_device pointer
992 * @exclusive: fence we need to sync to
993 * @gtt_flags: flags as they are used for GTT
994 * @pages_addr: DMA addresses to use for mapping
996 * @mapping: mapped range and flags to use for the update
997 * @addr: addr to set the area to
998 * @flags: HW flags for the mapping
999 * @fence: optional resulting fence
1001 * Split the mapping into smaller chunks so that each update fits
1003 * Returns 0 for success, -EINVAL for failure.
1005 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device
*adev
,
1006 struct fence
*exclusive
,
1008 dma_addr_t
*pages_addr
,
1009 struct amdgpu_vm
*vm
,
1010 struct amdgpu_bo_va_mapping
*mapping
,
1011 uint32_t flags
, uint64_t addr
,
1012 struct fence
**fence
)
1014 const uint64_t max_size
= 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE
;
1016 uint64_t src
= 0, start
= mapping
->it
.start
;
1019 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1020 * but in case of something, we filter the flags in first place
1022 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1023 flags
&= ~AMDGPU_PTE_READABLE
;
1024 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1025 flags
&= ~AMDGPU_PTE_WRITEABLE
;
1027 trace_amdgpu_vm_bo_update(mapping
);
1030 if (flags
== gtt_flags
)
1031 src
= adev
->gart
.table_addr
+ (addr
>> 12) * 8;
1034 addr
+= mapping
->offset
;
1036 if (!pages_addr
|| src
)
1037 return amdgpu_vm_bo_update_mapping(adev
, exclusive
,
1038 src
, pages_addr
, vm
,
1039 start
, mapping
->it
.last
,
1040 flags
, addr
, fence
);
1042 while (start
!= mapping
->it
.last
+ 1) {
1045 last
= min((uint64_t)mapping
->it
.last
, start
+ max_size
- 1);
1046 r
= amdgpu_vm_bo_update_mapping(adev
, exclusive
,
1047 src
, pages_addr
, vm
,
1048 start
, last
, flags
, addr
,
1054 addr
+= max_size
* AMDGPU_GPU_PAGE_SIZE
;
1061 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1063 * @adev: amdgpu_device pointer
1064 * @bo_va: requested BO and VM object
1067 * Fill in the page table entries for @bo_va.
1068 * Returns 0 for success, -EINVAL for failure.
1070 * Object have to be reserved and mutex must be locked!
1072 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
1073 struct amdgpu_bo_va
*bo_va
,
1074 struct ttm_mem_reg
*mem
)
1076 struct amdgpu_vm
*vm
= bo_va
->vm
;
1077 struct amdgpu_bo_va_mapping
*mapping
;
1078 dma_addr_t
*pages_addr
= NULL
;
1079 uint32_t gtt_flags
, flags
;
1080 struct fence
*exclusive
;
1085 struct ttm_dma_tt
*ttm
;
1087 addr
= (u64
)mem
->start
<< PAGE_SHIFT
;
1088 switch (mem
->mem_type
) {
1090 ttm
= container_of(bo_va
->bo
->tbo
.ttm
, struct
1092 pages_addr
= ttm
->dma_address
;
1096 addr
+= adev
->vm_manager
.vram_base_offset
;
1103 exclusive
= reservation_object_get_excl(bo_va
->bo
->tbo
.resv
);
1109 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo_va
->bo
->tbo
.ttm
, mem
);
1110 gtt_flags
= (adev
== bo_va
->bo
->adev
) ? flags
: 0;
1112 spin_lock(&vm
->status_lock
);
1113 if (!list_empty(&bo_va
->vm_status
))
1114 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1115 spin_unlock(&vm
->status_lock
);
1117 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1118 r
= amdgpu_vm_bo_split_mapping(adev
, exclusive
,
1119 gtt_flags
, pages_addr
, vm
,
1120 mapping
, flags
, addr
,
1121 &bo_va
->last_pt_update
);
1126 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1127 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1128 trace_amdgpu_vm_bo_mapping(mapping
);
1130 list_for_each_entry(mapping
, &bo_va
->invalids
, list
)
1131 trace_amdgpu_vm_bo_mapping(mapping
);
1134 spin_lock(&vm
->status_lock
);
1135 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1136 list_del_init(&bo_va
->vm_status
);
1138 list_add(&bo_va
->vm_status
, &vm
->cleared
);
1139 spin_unlock(&vm
->status_lock
);
1145 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1147 * @adev: amdgpu_device pointer
1150 * Make sure all freed BOs are cleared in the PT.
1151 * Returns 0 for success.
1153 * PTs have to be reserved and mutex must be locked!
1155 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
1156 struct amdgpu_vm
*vm
)
1158 struct amdgpu_bo_va_mapping
*mapping
;
1161 while (!list_empty(&vm
->freed
)) {
1162 mapping
= list_first_entry(&vm
->freed
,
1163 struct amdgpu_bo_va_mapping
, list
);
1164 list_del(&mapping
->list
);
1166 r
= amdgpu_vm_bo_split_mapping(adev
, NULL
, 0, NULL
, vm
, mapping
,
1178 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1180 * @adev: amdgpu_device pointer
1183 * Make sure all invalidated BOs are cleared in the PT.
1184 * Returns 0 for success.
1186 * PTs have to be reserved and mutex must be locked!
1188 int amdgpu_vm_clear_invalids(struct amdgpu_device
*adev
,
1189 struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
1191 struct amdgpu_bo_va
*bo_va
= NULL
;
1194 spin_lock(&vm
->status_lock
);
1195 while (!list_empty(&vm
->invalidated
)) {
1196 bo_va
= list_first_entry(&vm
->invalidated
,
1197 struct amdgpu_bo_va
, vm_status
);
1198 spin_unlock(&vm
->status_lock
);
1200 r
= amdgpu_vm_bo_update(adev
, bo_va
, NULL
);
1204 spin_lock(&vm
->status_lock
);
1206 spin_unlock(&vm
->status_lock
);
1209 r
= amdgpu_sync_fence(adev
, sync
, bo_va
->last_pt_update
);
1215 * amdgpu_vm_bo_add - add a bo to a specific vm
1217 * @adev: amdgpu_device pointer
1219 * @bo: amdgpu buffer object
1221 * Add @bo into the requested vm.
1222 * Add @bo to the list of bos associated with the vm
1223 * Returns newly added bo_va or NULL for failure
1225 * Object has to be reserved!
1227 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
1228 struct amdgpu_vm
*vm
,
1229 struct amdgpu_bo
*bo
)
1231 struct amdgpu_bo_va
*bo_va
;
1233 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
1234 if (bo_va
== NULL
) {
1239 bo_va
->ref_count
= 1;
1240 INIT_LIST_HEAD(&bo_va
->bo_list
);
1241 INIT_LIST_HEAD(&bo_va
->valids
);
1242 INIT_LIST_HEAD(&bo_va
->invalids
);
1243 INIT_LIST_HEAD(&bo_va
->vm_status
);
1245 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
1251 * amdgpu_vm_bo_map - map bo inside a vm
1253 * @adev: amdgpu_device pointer
1254 * @bo_va: bo_va to store the address
1255 * @saddr: where to map the BO
1256 * @offset: requested offset in the BO
1257 * @flags: attributes of pages (read/write/valid/etc.)
1259 * Add a mapping of the BO at the specefied addr into the VM.
1260 * Returns 0 for success, error for failure.
1262 * Object has to be reserved and unreserved outside!
1264 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
1265 struct amdgpu_bo_va
*bo_va
,
1266 uint64_t saddr
, uint64_t offset
,
1267 uint64_t size
, uint32_t flags
)
1269 struct amdgpu_bo_va_mapping
*mapping
;
1270 struct amdgpu_vm
*vm
= bo_va
->vm
;
1271 struct interval_tree_node
*it
;
1272 unsigned last_pfn
, pt_idx
;
1276 /* validate the parameters */
1277 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
1278 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
1281 /* make sure object fit at this offset */
1282 eaddr
= saddr
+ size
- 1;
1283 if ((saddr
>= eaddr
) || (offset
+ size
> amdgpu_bo_size(bo_va
->bo
)))
1286 last_pfn
= eaddr
/ AMDGPU_GPU_PAGE_SIZE
;
1287 if (last_pfn
>= adev
->vm_manager
.max_pfn
) {
1288 dev_err(adev
->dev
, "va above limit (0x%08X >= 0x%08X)\n",
1289 last_pfn
, adev
->vm_manager
.max_pfn
);
1293 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1294 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
1296 it
= interval_tree_iter_first(&vm
->va
, saddr
, eaddr
);
1298 struct amdgpu_bo_va_mapping
*tmp
;
1299 tmp
= container_of(it
, struct amdgpu_bo_va_mapping
, it
);
1300 /* bo and tmp overlap, invalid addr */
1301 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1302 "0x%010lx-0x%010lx\n", bo_va
->bo
, saddr
, eaddr
,
1303 tmp
->it
.start
, tmp
->it
.last
+ 1);
1308 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
1314 INIT_LIST_HEAD(&mapping
->list
);
1315 mapping
->it
.start
= saddr
;
1316 mapping
->it
.last
= eaddr
;
1317 mapping
->offset
= offset
;
1318 mapping
->flags
= flags
;
1320 list_add(&mapping
->list
, &bo_va
->invalids
);
1321 interval_tree_insert(&mapping
->it
, &vm
->va
);
1323 /* Make sure the page tables are allocated */
1324 saddr
>>= amdgpu_vm_block_size
;
1325 eaddr
>>= amdgpu_vm_block_size
;
1327 BUG_ON(eaddr
>= amdgpu_vm_num_pdes(adev
));
1329 if (eaddr
> vm
->max_pde_used
)
1330 vm
->max_pde_used
= eaddr
;
1332 /* walk over the address space and allocate the page tables */
1333 for (pt_idx
= saddr
; pt_idx
<= eaddr
; ++pt_idx
) {
1334 struct reservation_object
*resv
= vm
->page_directory
->tbo
.resv
;
1335 struct amdgpu_bo_list_entry
*entry
;
1336 struct amdgpu_bo
*pt
;
1338 entry
= &vm
->page_tables
[pt_idx
].entry
;
1342 r
= amdgpu_bo_create(adev
, AMDGPU_VM_PTE_COUNT
* 8,
1343 AMDGPU_GPU_PAGE_SIZE
, true,
1344 AMDGPU_GEM_DOMAIN_VRAM
,
1345 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
,
1350 /* Keep a reference to the page table to avoid freeing
1351 * them up in the wrong order.
1353 pt
->parent
= amdgpu_bo_ref(vm
->page_directory
);
1355 r
= amdgpu_vm_clear_bo(adev
, vm
, pt
);
1357 amdgpu_bo_unref(&pt
);
1362 entry
->priority
= 0;
1363 entry
->tv
.bo
= &entry
->robj
->tbo
;
1364 entry
->tv
.shared
= true;
1365 entry
->user_pages
= NULL
;
1366 vm
->page_tables
[pt_idx
].addr
= 0;
1372 list_del(&mapping
->list
);
1373 interval_tree_remove(&mapping
->it
, &vm
->va
);
1374 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1382 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1384 * @adev: amdgpu_device pointer
1385 * @bo_va: bo_va to remove the address from
1386 * @saddr: where to the BO is mapped
1388 * Remove a mapping of the BO at the specefied addr from the VM.
1389 * Returns 0 for success, error for failure.
1391 * Object has to be reserved and unreserved outside!
1393 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
1394 struct amdgpu_bo_va
*bo_va
,
1397 struct amdgpu_bo_va_mapping
*mapping
;
1398 struct amdgpu_vm
*vm
= bo_va
->vm
;
1401 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1403 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
1404 if (mapping
->it
.start
== saddr
)
1408 if (&mapping
->list
== &bo_va
->valids
) {
1411 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1412 if (mapping
->it
.start
== saddr
)
1416 if (&mapping
->list
== &bo_va
->invalids
)
1420 list_del(&mapping
->list
);
1421 interval_tree_remove(&mapping
->it
, &vm
->va
);
1422 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1425 list_add(&mapping
->list
, &vm
->freed
);
1433 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1435 * @adev: amdgpu_device pointer
1436 * @bo_va: requested bo_va
1438 * Remove @bo_va->bo from the requested vm.
1440 * Object have to be reserved!
1442 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
1443 struct amdgpu_bo_va
*bo_va
)
1445 struct amdgpu_bo_va_mapping
*mapping
, *next
;
1446 struct amdgpu_vm
*vm
= bo_va
->vm
;
1448 list_del(&bo_va
->bo_list
);
1450 spin_lock(&vm
->status_lock
);
1451 list_del(&bo_va
->vm_status
);
1452 spin_unlock(&vm
->status_lock
);
1454 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
1455 list_del(&mapping
->list
);
1456 interval_tree_remove(&mapping
->it
, &vm
->va
);
1457 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1458 list_add(&mapping
->list
, &vm
->freed
);
1460 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
1461 list_del(&mapping
->list
);
1462 interval_tree_remove(&mapping
->it
, &vm
->va
);
1466 fence_put(bo_va
->last_pt_update
);
1471 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1473 * @adev: amdgpu_device pointer
1475 * @bo: amdgpu buffer object
1477 * Mark @bo as invalid.
1479 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
1480 struct amdgpu_bo
*bo
)
1482 struct amdgpu_bo_va
*bo_va
;
1484 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
1485 spin_lock(&bo_va
->vm
->status_lock
);
1486 if (list_empty(&bo_va
->vm_status
))
1487 list_add(&bo_va
->vm_status
, &bo_va
->vm
->invalidated
);
1488 spin_unlock(&bo_va
->vm
->status_lock
);
1493 * amdgpu_vm_init - initialize a vm instance
1495 * @adev: amdgpu_device pointer
1500 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1502 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
1503 AMDGPU_VM_PTE_COUNT
* 8);
1504 unsigned pd_size
, pd_entries
;
1505 unsigned ring_instance
;
1506 struct amdgpu_ring
*ring
;
1507 struct amd_sched_rq
*rq
;
1510 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
1513 vm
->client_id
= atomic64_inc_return(&adev
->vm_manager
.client_counter
);
1514 spin_lock_init(&vm
->status_lock
);
1515 INIT_LIST_HEAD(&vm
->invalidated
);
1516 INIT_LIST_HEAD(&vm
->cleared
);
1517 INIT_LIST_HEAD(&vm
->freed
);
1519 pd_size
= amdgpu_vm_directory_size(adev
);
1520 pd_entries
= amdgpu_vm_num_pdes(adev
);
1522 /* allocate page table array */
1523 vm
->page_tables
= drm_calloc_large(pd_entries
, sizeof(struct amdgpu_vm_pt
));
1524 if (vm
->page_tables
== NULL
) {
1525 DRM_ERROR("Cannot allocate memory for page table array\n");
1529 /* create scheduler entity for page table updates */
1531 ring_instance
= atomic_inc_return(&adev
->vm_manager
.vm_pte_next_ring
);
1532 ring_instance
%= adev
->vm_manager
.vm_pte_num_rings
;
1533 ring
= adev
->vm_manager
.vm_pte_rings
[ring_instance
];
1534 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_KERNEL
];
1535 r
= amd_sched_entity_init(&ring
->sched
, &vm
->entity
,
1536 rq
, amdgpu_sched_jobs
);
1540 vm
->page_directory_fence
= NULL
;
1542 r
= amdgpu_bo_create(adev
, pd_size
, align
, true,
1543 AMDGPU_GEM_DOMAIN_VRAM
,
1544 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
,
1545 NULL
, NULL
, &vm
->page_directory
);
1547 goto error_free_sched_entity
;
1549 r
= amdgpu_bo_reserve(vm
->page_directory
, false);
1551 goto error_free_page_directory
;
1553 r
= amdgpu_vm_clear_bo(adev
, vm
, vm
->page_directory
);
1554 amdgpu_bo_unreserve(vm
->page_directory
);
1556 goto error_free_page_directory
;
1557 vm
->last_eviction_counter
= atomic64_read(&adev
->num_evictions
);
1561 error_free_page_directory
:
1562 amdgpu_bo_unref(&vm
->page_directory
);
1563 vm
->page_directory
= NULL
;
1565 error_free_sched_entity
:
1566 amd_sched_entity_fini(&ring
->sched
, &vm
->entity
);
1572 * amdgpu_vm_fini - tear down a vm instance
1574 * @adev: amdgpu_device pointer
1578 * Unbind the VM and remove all bos from the vm bo list
1580 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1582 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
1585 amd_sched_entity_fini(vm
->entity
.sched
, &vm
->entity
);
1587 if (!RB_EMPTY_ROOT(&vm
->va
)) {
1588 dev_err(adev
->dev
, "still active bo inside vm\n");
1590 rbtree_postorder_for_each_entry_safe(mapping
, tmp
, &vm
->va
, it
.rb
) {
1591 list_del(&mapping
->list
);
1592 interval_tree_remove(&mapping
->it
, &vm
->va
);
1595 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
1596 list_del(&mapping
->list
);
1600 for (i
= 0; i
< amdgpu_vm_num_pdes(adev
); i
++)
1601 amdgpu_bo_unref(&vm
->page_tables
[i
].entry
.robj
);
1602 drm_free_large(vm
->page_tables
);
1604 amdgpu_bo_unref(&vm
->page_directory
);
1605 fence_put(vm
->page_directory_fence
);
1609 * amdgpu_vm_manager_init - init the VM manager
1611 * @adev: amdgpu_device pointer
1613 * Initialize the VM manager structures
1615 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
1619 INIT_LIST_HEAD(&adev
->vm_manager
.ids_lru
);
1621 /* skip over VMID 0, since it is the system VM */
1622 for (i
= 1; i
< adev
->vm_manager
.num_ids
; ++i
) {
1623 amdgpu_vm_reset_id(adev
, i
);
1624 amdgpu_sync_create(&adev
->vm_manager
.ids
[i
].active
);
1625 list_add_tail(&adev
->vm_manager
.ids
[i
].list
,
1626 &adev
->vm_manager
.ids_lru
);
1629 adev
->vm_manager
.fence_context
= fence_context_alloc(AMDGPU_MAX_RINGS
);
1630 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
1631 adev
->vm_manager
.seqno
[i
] = 0;
1633 atomic_set(&adev
->vm_manager
.vm_pte_next_ring
, 0);
1634 atomic64_set(&adev
->vm_manager
.client_counter
, 0);
1638 * amdgpu_vm_manager_fini - cleanup VM manager
1640 * @adev: amdgpu_device pointer
1642 * Cleanup the VM manager and free resources.
1644 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
1648 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
) {
1649 struct amdgpu_vm_id
*id
= &adev
->vm_manager
.ids
[i
];
1651 fence_put(adev
->vm_manager
.ids
[i
].first
);
1652 amdgpu_sync_free(&adev
->vm_manager
.ids
[i
].active
);
1653 fence_put(id
->flushed_updates
);