2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/fence-array.h>
30 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_trace.h"
36 * GPUVM is similar to the legacy gart on older asics, however
37 * rather than there being a single global gart table
38 * for the entire GPU, there are multiple VM page tables active
39 * at any given time. The VM page tables can contain a mix
40 * vram pages and system memory pages and system memory pages
41 * can be mapped as snooped (cached system pages) or unsnooped
42 * (uncached system pages).
43 * Each VM has an ID associated with it and there is a page table
44 * associated with each VMID. When execting a command buffer,
45 * the kernel tells the the ring what VMID to use for that command
46 * buffer. VMIDs are allocated dynamically as commands are submitted.
47 * The userspace drivers maintain their own address space and the kernel
48 * sets up their pages tables accordingly when they submit their
49 * command buffers and a VMID is assigned.
50 * Cayman/Trinity support up to 8 active VMs at any given time;
54 /* Special value that no flush is necessary */
55 #define AMDGPU_VM_NO_FLUSH (~0ll)
57 /* Local structure. Encapsulate some VM table update parameters to reduce
58 * the number of function parameters
60 struct amdgpu_pte_update_params
{
61 /* amdgpu device we do this update for */
62 struct amdgpu_device
*adev
;
63 /* address where to copy page table entries from */
65 /* DMA addresses to use for mapping */
66 dma_addr_t
*pages_addr
;
67 /* indirect buffer to fill with commands */
72 * amdgpu_vm_num_pde - return the number of page directory entries
74 * @adev: amdgpu_device pointer
76 * Calculate the number of page directory entries.
78 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device
*adev
)
80 return adev
->vm_manager
.max_pfn
>> amdgpu_vm_block_size
;
84 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
86 * @adev: amdgpu_device pointer
88 * Calculate the size of the page directory in bytes.
90 static unsigned amdgpu_vm_directory_size(struct amdgpu_device
*adev
)
92 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev
) * 8);
96 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
98 * @vm: vm providing the BOs
99 * @validated: head of validation list
100 * @entry: entry to add
102 * Add the page directory to the list of BOs to
103 * validate for command submission.
105 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
106 struct list_head
*validated
,
107 struct amdgpu_bo_list_entry
*entry
)
109 entry
->robj
= vm
->page_directory
;
111 entry
->tv
.bo
= &vm
->page_directory
->tbo
;
112 entry
->tv
.shared
= true;
113 entry
->user_pages
= NULL
;
114 list_add(&entry
->tv
.head
, validated
);
118 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
120 * @adev: amdgpu device pointer
121 * @vm: vm providing the BOs
122 * @duplicates: head of duplicates list
124 * Add the page directory to the BO duplicates list
125 * for command submission.
127 void amdgpu_vm_get_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
128 struct list_head
*duplicates
)
130 uint64_t num_evictions
;
133 /* We only need to validate the page tables
134 * if they aren't already valid.
136 num_evictions
= atomic64_read(&adev
->num_evictions
);
137 if (num_evictions
== vm
->last_eviction_counter
)
140 /* add the vm page table to the list */
141 for (i
= 0; i
<= vm
->max_pde_used
; ++i
) {
142 struct amdgpu_bo_list_entry
*entry
= &vm
->page_tables
[i
].entry
;
147 list_add(&entry
->tv
.head
, duplicates
);
153 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
155 * @adev: amdgpu device instance
156 * @vm: vm providing the BOs
158 * Move the PT BOs to the tail of the LRU.
160 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device
*adev
,
161 struct amdgpu_vm
*vm
)
163 struct ttm_bo_global
*glob
= adev
->mman
.bdev
.glob
;
166 spin_lock(&glob
->lru_lock
);
167 for (i
= 0; i
<= vm
->max_pde_used
; ++i
) {
168 struct amdgpu_bo_list_entry
*entry
= &vm
->page_tables
[i
].entry
;
173 ttm_bo_move_to_lru_tail(&entry
->robj
->tbo
);
175 spin_unlock(&glob
->lru_lock
);
178 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device
*adev
,
179 struct amdgpu_vm_id
*id
)
181 return id
->current_gpu_reset_count
!=
182 atomic_read(&adev
->gpu_reset_counter
) ? true : false;
186 * amdgpu_vm_grab_id - allocate the next free VMID
188 * @vm: vm to allocate id for
189 * @ring: ring we want to submit job to
190 * @sync: sync object where we add dependencies
191 * @fence: fence protecting ID from reuse
193 * Allocate an id for the vm, adding fences to the sync obj as necessary.
195 int amdgpu_vm_grab_id(struct amdgpu_vm
*vm
, struct amdgpu_ring
*ring
,
196 struct amdgpu_sync
*sync
, struct fence
*fence
,
197 struct amdgpu_job
*job
)
199 struct amdgpu_device
*adev
= ring
->adev
;
200 uint64_t fence_context
= adev
->fence_context
+ ring
->idx
;
201 struct fence
*updates
= sync
->last_vm_update
;
202 struct amdgpu_vm_id
*id
, *idle
;
203 struct fence
**fences
;
207 fences
= kmalloc_array(sizeof(void *), adev
->vm_manager
.num_ids
,
212 mutex_lock(&adev
->vm_manager
.lock
);
214 /* Check if we have an idle VMID */
216 list_for_each_entry(idle
, &adev
->vm_manager
.ids_lru
, list
) {
217 fences
[i
] = amdgpu_sync_peek_fence(&idle
->active
, ring
);
223 /* If we can't find a idle VMID to use, wait till one becomes available */
224 if (&idle
->list
== &adev
->vm_manager
.ids_lru
) {
225 u64 fence_context
= adev
->vm_manager
.fence_context
+ ring
->idx
;
226 unsigned seqno
= ++adev
->vm_manager
.seqno
[ring
->idx
];
227 struct fence_array
*array
;
230 for (j
= 0; j
< i
; ++j
)
231 fence_get(fences
[j
]);
233 array
= fence_array_create(i
, fences
, fence_context
,
236 for (j
= 0; j
< i
; ++j
)
237 fence_put(fences
[j
]);
244 r
= amdgpu_sync_fence(ring
->adev
, sync
, &array
->base
);
245 fence_put(&array
->base
);
249 mutex_unlock(&adev
->vm_manager
.lock
);
255 job
->vm_needs_flush
= true;
256 /* Check if we can use a VMID already assigned to this VM */
259 struct fence
*flushed
;
262 if (i
== AMDGPU_MAX_RINGS
)
265 /* Check all the prerequisites to using this VMID */
268 if (amdgpu_vm_is_gpu_reset(adev
, id
))
271 if (atomic64_read(&id
->owner
) != vm
->client_id
)
274 if (job
->vm_pd_addr
!= id
->pd_gpu_addr
)
280 if (id
->last_flush
->context
!= fence_context
&&
281 !fence_is_signaled(id
->last_flush
))
284 flushed
= id
->flushed_updates
;
286 (!flushed
|| fence_is_later(updates
, flushed
)))
289 /* Good we can use this VMID. Remember this submission as
292 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
296 id
->current_gpu_reset_count
= atomic_read(&adev
->gpu_reset_counter
);
297 list_move_tail(&id
->list
, &adev
->vm_manager
.ids_lru
);
298 vm
->ids
[ring
->idx
] = id
;
300 job
->vm_id
= id
- adev
->vm_manager
.ids
;
301 job
->vm_needs_flush
= false;
302 trace_amdgpu_vm_grab_id(vm
, ring
->idx
, job
);
304 mutex_unlock(&adev
->vm_manager
.lock
);
307 } while (i
!= ring
->idx
);
309 /* Still no ID to use? Then use the idle one found earlier */
312 /* Remember this submission as user of the VMID */
313 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
317 fence_put(id
->first
);
318 id
->first
= fence_get(fence
);
320 fence_put(id
->last_flush
);
321 id
->last_flush
= NULL
;
323 fence_put(id
->flushed_updates
);
324 id
->flushed_updates
= fence_get(updates
);
326 id
->pd_gpu_addr
= job
->vm_pd_addr
;
327 id
->current_gpu_reset_count
= atomic_read(&adev
->gpu_reset_counter
);
328 list_move_tail(&id
->list
, &adev
->vm_manager
.ids_lru
);
329 atomic64_set(&id
->owner
, vm
->client_id
);
330 vm
->ids
[ring
->idx
] = id
;
332 job
->vm_id
= id
- adev
->vm_manager
.ids
;
333 trace_amdgpu_vm_grab_id(vm
, ring
->idx
, job
);
336 mutex_unlock(&adev
->vm_manager
.lock
);
340 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring
*ring
)
342 struct amdgpu_device
*adev
= ring
->adev
;
343 const struct amdgpu_ip_block_version
*ip_block
;
345 if (ring
->type
!= AMDGPU_RING_TYPE_COMPUTE
)
346 /* only compute rings */
349 ip_block
= amdgpu_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
353 if (ip_block
->major
<= 7) {
354 /* gfx7 has no workaround */
356 } else if (ip_block
->major
== 8) {
357 if (adev
->gfx
.mec_fw_version
>= 673)
358 /* gfx8 is fixed in MEC firmware 673 */
367 * amdgpu_vm_flush - hardware flush the vm
369 * @ring: ring to use for flush
370 * @vm_id: vmid number to use
371 * @pd_addr: address of the page directory
373 * Emit a VM flush when it is necessary.
375 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
)
377 struct amdgpu_device
*adev
= ring
->adev
;
378 struct amdgpu_vm_id
*id
= &adev
->vm_manager
.ids
[job
->vm_id
];
379 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
380 id
->gds_base
!= job
->gds_base
||
381 id
->gds_size
!= job
->gds_size
||
382 id
->gws_base
!= job
->gws_base
||
383 id
->gws_size
!= job
->gws_size
||
384 id
->oa_base
!= job
->oa_base
||
385 id
->oa_size
!= job
->oa_size
);
388 if (ring
->funcs
->emit_pipeline_sync
&& (
389 job
->vm_needs_flush
|| gds_switch_needed
||
390 amdgpu_vm_ring_has_compute_vm_bug(ring
)))
391 amdgpu_ring_emit_pipeline_sync(ring
);
393 if (ring
->funcs
->emit_vm_flush
&& (job
->vm_needs_flush
||
394 amdgpu_vm_is_gpu_reset(adev
, id
))) {
397 trace_amdgpu_vm_flush(job
->vm_pd_addr
, ring
->idx
, job
->vm_id
);
398 amdgpu_ring_emit_vm_flush(ring
, job
->vm_id
, job
->vm_pd_addr
);
400 r
= amdgpu_fence_emit(ring
, &fence
);
404 mutex_lock(&adev
->vm_manager
.lock
);
405 fence_put(id
->last_flush
);
406 id
->last_flush
= fence
;
407 mutex_unlock(&adev
->vm_manager
.lock
);
410 if (gds_switch_needed
) {
411 id
->gds_base
= job
->gds_base
;
412 id
->gds_size
= job
->gds_size
;
413 id
->gws_base
= job
->gws_base
;
414 id
->gws_size
= job
->gws_size
;
415 id
->oa_base
= job
->oa_base
;
416 id
->oa_size
= job
->oa_size
;
417 amdgpu_ring_emit_gds_switch(ring
, job
->vm_id
,
418 job
->gds_base
, job
->gds_size
,
419 job
->gws_base
, job
->gws_size
,
420 job
->oa_base
, job
->oa_size
);
427 * amdgpu_vm_reset_id - reset VMID to zero
429 * @adev: amdgpu device structure
430 * @vm_id: vmid number to use
432 * Reset saved GDW, GWS and OA to force switch on next flush.
434 void amdgpu_vm_reset_id(struct amdgpu_device
*adev
, unsigned vm_id
)
436 struct amdgpu_vm_id
*id
= &adev
->vm_manager
.ids
[vm_id
];
447 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
450 * @bo: requested buffer object
452 * Find @bo inside the requested vm.
453 * Search inside the @bos vm list for the requested vm
454 * Returns the found bo_va or NULL if none is found
456 * Object has to be reserved!
458 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
459 struct amdgpu_bo
*bo
)
461 struct amdgpu_bo_va
*bo_va
;
463 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
464 if (bo_va
->vm
== vm
) {
472 * amdgpu_vm_update_pages - helper to call the right asic function
474 * @params: see amdgpu_pte_update_params definition
475 * @pe: addr of the page entry
476 * @addr: dst addr to write into pe
477 * @count: number of page entries to update
478 * @incr: increase next addr by incr bytes
479 * @flags: hw access flags
481 * Traces the parameters and calls the right asic functions
482 * to setup the page table using the DMA.
484 static void amdgpu_vm_update_pages(struct amdgpu_pte_update_params
*params
,
485 uint64_t pe
, uint64_t addr
,
486 unsigned count
, uint32_t incr
,
489 trace_amdgpu_vm_set_page(pe
, addr
, count
, incr
, flags
);
492 amdgpu_vm_copy_pte(params
->adev
, params
->ib
,
493 pe
, (params
->src
+ (addr
>> 12) * 8), count
);
495 } else if (params
->pages_addr
) {
496 amdgpu_vm_write_pte(params
->adev
, params
->ib
,
498 pe
, addr
, count
, incr
, flags
);
500 } else if (count
< 3) {
501 amdgpu_vm_write_pte(params
->adev
, params
->ib
, NULL
, pe
, addr
,
505 amdgpu_vm_set_pte_pde(params
->adev
, params
->ib
, pe
, addr
,
511 * amdgpu_vm_clear_bo - initially clear the page dir/table
513 * @adev: amdgpu_device pointer
516 * need to reserve bo first before calling it.
518 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
519 struct amdgpu_vm
*vm
,
520 struct amdgpu_bo
*bo
)
522 struct amdgpu_ring
*ring
;
523 struct fence
*fence
= NULL
;
524 struct amdgpu_job
*job
;
525 struct amdgpu_pte_update_params params
;
530 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
532 r
= reservation_object_reserve_shared(bo
->tbo
.resv
);
536 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
540 addr
= amdgpu_bo_gpu_offset(bo
);
541 entries
= amdgpu_bo_size(bo
) / 8;
543 r
= amdgpu_job_alloc_with_ib(adev
, 64, &job
);
547 memset(¶ms
, 0, sizeof(params
));
549 params
.ib
= &job
->ibs
[0];
550 amdgpu_vm_update_pages(¶ms
, addr
, 0, entries
, 0, 0);
551 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
553 WARN_ON(job
->ibs
[0].length_dw
> 64);
554 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
555 AMDGPU_FENCE_OWNER_VM
, &fence
);
559 amdgpu_bo_fence(bo
, fence
, true);
564 amdgpu_job_free(job
);
571 * amdgpu_vm_map_gart - Resolve gart mapping of addr
573 * @pages_addr: optional DMA address to use for lookup
574 * @addr: the unmapped addr
576 * Look up the physical address of the page that the pte resolves
577 * to and return the pointer for the page table entry.
579 uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
584 /* page table offset */
585 result
= pages_addr
[addr
>> PAGE_SHIFT
];
587 /* in case cpu page size != gpu page size*/
588 result
|= addr
& (~PAGE_MASK
);
591 /* No mapping required */
595 result
&= 0xFFFFFFFFFFFFF000ULL
;
601 * amdgpu_vm_update_pdes - make sure that page directory is valid
603 * @adev: amdgpu_device pointer
605 * @start: start of GPU address range
606 * @end: end of GPU address range
608 * Allocates new page tables if necessary
609 * and updates the page directory.
610 * Returns 0 for success, error for failure.
612 int amdgpu_vm_update_page_directory(struct amdgpu_device
*adev
,
613 struct amdgpu_vm
*vm
)
615 struct amdgpu_ring
*ring
;
616 struct amdgpu_bo
*pd
= vm
->page_directory
;
617 uint64_t pd_addr
= amdgpu_bo_gpu_offset(pd
);
618 uint32_t incr
= AMDGPU_VM_PTE_COUNT
* 8;
619 uint64_t last_pde
= ~0, last_pt
= ~0;
620 unsigned count
= 0, pt_idx
, ndw
;
621 struct amdgpu_job
*job
;
622 struct amdgpu_pte_update_params params
;
623 struct fence
*fence
= NULL
;
627 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
632 /* assume the worst case */
633 ndw
+= vm
->max_pde_used
* 6;
635 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
639 memset(¶ms
, 0, sizeof(params
));
641 params
.ib
= &job
->ibs
[0];
643 /* walk over the address space and update the page directory */
644 for (pt_idx
= 0; pt_idx
<= vm
->max_pde_used
; ++pt_idx
) {
645 struct amdgpu_bo
*bo
= vm
->page_tables
[pt_idx
].entry
.robj
;
651 pt
= amdgpu_bo_gpu_offset(bo
);
652 if (vm
->page_tables
[pt_idx
].addr
== pt
)
654 vm
->page_tables
[pt_idx
].addr
= pt
;
656 pde
= pd_addr
+ pt_idx
* 8;
657 if (((last_pde
+ 8 * count
) != pde
) ||
658 ((last_pt
+ incr
* count
) != pt
)) {
661 amdgpu_vm_update_pages(¶ms
, last_pde
,
662 last_pt
, count
, incr
,
675 amdgpu_vm_update_pages(¶ms
, last_pde
, last_pt
,
676 count
, incr
, AMDGPU_PTE_VALID
);
678 if (params
.ib
->length_dw
!= 0) {
679 amdgpu_ring_pad_ib(ring
, params
.ib
);
680 amdgpu_sync_resv(adev
, &job
->sync
, pd
->tbo
.resv
,
681 AMDGPU_FENCE_OWNER_VM
);
682 WARN_ON(params
.ib
->length_dw
> ndw
);
683 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
684 AMDGPU_FENCE_OWNER_VM
, &fence
);
688 amdgpu_bo_fence(pd
, fence
, true);
689 fence_put(vm
->page_directory_fence
);
690 vm
->page_directory_fence
= fence_get(fence
);
694 amdgpu_job_free(job
);
700 amdgpu_job_free(job
);
705 * amdgpu_vm_update_ptes - make sure that page tables are valid
707 * @params: see amdgpu_pte_update_params definition
709 * @start: start of GPU address range
710 * @end: end of GPU address range
711 * @dst: destination address to map to, the next dst inside the function
712 * @flags: mapping flags
714 * Update the page tables in the range @start - @end.
716 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params
*params
,
717 struct amdgpu_vm
*vm
,
718 uint64_t start
, uint64_t end
,
719 uint64_t dst
, uint32_t flags
)
721 const uint64_t mask
= AMDGPU_VM_PTE_COUNT
- 1;
723 uint64_t cur_pe_start
, cur_nptes
, cur_dst
;
724 uint64_t addr
; /* next GPU address to be updated */
726 struct amdgpu_bo
*pt
;
727 unsigned nptes
; /* next number of ptes to be updated */
728 uint64_t next_pe_start
;
730 /* initialize the variables */
732 pt_idx
= addr
>> amdgpu_vm_block_size
;
733 pt
= vm
->page_tables
[pt_idx
].entry
.robj
;
735 if ((addr
& ~mask
) == (end
& ~mask
))
738 nptes
= AMDGPU_VM_PTE_COUNT
- (addr
& mask
);
740 cur_pe_start
= amdgpu_bo_gpu_offset(pt
);
741 cur_pe_start
+= (addr
& mask
) * 8;
747 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
749 /* walk over the address space and update the page tables */
751 pt_idx
= addr
>> amdgpu_vm_block_size
;
752 pt
= vm
->page_tables
[pt_idx
].entry
.robj
;
754 if ((addr
& ~mask
) == (end
& ~mask
))
757 nptes
= AMDGPU_VM_PTE_COUNT
- (addr
& mask
);
759 next_pe_start
= amdgpu_bo_gpu_offset(pt
);
760 next_pe_start
+= (addr
& mask
) * 8;
762 if ((cur_pe_start
+ 8 * cur_nptes
) == next_pe_start
) {
763 /* The next ptb is consecutive to current ptb.
764 * Don't call amdgpu_vm_update_pages now.
765 * Will update two ptbs together in future.
769 amdgpu_vm_update_pages(params
, cur_pe_start
, cur_dst
,
770 cur_nptes
, AMDGPU_GPU_PAGE_SIZE
,
773 cur_pe_start
= next_pe_start
;
780 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
783 amdgpu_vm_update_pages(params
, cur_pe_start
, cur_dst
, cur_nptes
,
784 AMDGPU_GPU_PAGE_SIZE
, flags
);
788 * amdgpu_vm_frag_ptes - add fragment information to PTEs
790 * @params: see amdgpu_pte_update_params definition
792 * @start: first PTE to handle
793 * @end: last PTE to handle
794 * @dst: addr those PTEs should point to
795 * @flags: hw mapping flags
797 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params
*params
,
798 struct amdgpu_vm
*vm
,
799 uint64_t start
, uint64_t end
,
800 uint64_t dst
, uint32_t flags
)
803 * The MC L1 TLB supports variable sized pages, based on a fragment
804 * field in the PTE. When this field is set to a non-zero value, page
805 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
806 * flags are considered valid for all PTEs within the fragment range
807 * and corresponding mappings are assumed to be physically contiguous.
809 * The L1 TLB can store a single PTE for the whole fragment,
810 * significantly increasing the space available for translation
811 * caching. This leads to large improvements in throughput when the
812 * TLB is under pressure.
814 * The L2 TLB distributes small and large fragments into two
815 * asymmetric partitions. The large fragment cache is significantly
816 * larger. Thus, we try to use large fragments wherever possible.
817 * Userspace can support this by aligning virtual base address and
818 * allocation size to the fragment size.
821 const uint64_t frag_align
= 1 << AMDGPU_LOG2_PAGES_PER_FRAG
;
823 uint64_t frag_start
= ALIGN(start
, frag_align
);
824 uint64_t frag_end
= end
& ~(frag_align
- 1);
828 /* system pages are non continuously */
829 if (params
->src
|| params
->pages_addr
|| !(flags
& AMDGPU_PTE_VALID
) ||
830 (frag_start
>= frag_end
)) {
832 amdgpu_vm_update_ptes(params
, vm
, start
, end
, dst
, flags
);
836 /* use more than 64KB fragment size if possible */
837 frag
= lower_32_bits(frag_start
| frag_end
);
838 frag
= likely(frag
) ? __ffs(frag
) : 31;
840 /* handle the 4K area at the beginning */
841 if (start
!= frag_start
) {
842 amdgpu_vm_update_ptes(params
, vm
, start
, frag_start
,
844 dst
+= (frag_start
- start
) * AMDGPU_GPU_PAGE_SIZE
;
847 /* handle the area in the middle */
848 amdgpu_vm_update_ptes(params
, vm
, frag_start
, frag_end
, dst
,
849 flags
| AMDGPU_PTE_FRAG(frag
));
851 /* handle the 4K area at the end */
852 if (frag_end
!= end
) {
853 dst
+= (frag_end
- frag_start
) * AMDGPU_GPU_PAGE_SIZE
;
854 amdgpu_vm_update_ptes(params
, vm
, frag_end
, end
, dst
, flags
);
859 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
861 * @adev: amdgpu_device pointer
862 * @exclusive: fence we need to sync to
863 * @src: address where to copy page table entries from
864 * @pages_addr: DMA addresses to use for mapping
866 * @start: start of mapped range
867 * @last: last mapped entry
868 * @flags: flags for the entries
869 * @addr: addr to set the area to
870 * @fence: optional resulting fence
872 * Fill in the page table entries between @start and @last.
873 * Returns 0 for success, -EINVAL for failure.
875 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
876 struct fence
*exclusive
,
878 dma_addr_t
*pages_addr
,
879 struct amdgpu_vm
*vm
,
880 uint64_t start
, uint64_t last
,
881 uint32_t flags
, uint64_t addr
,
882 struct fence
**fence
)
884 struct amdgpu_ring
*ring
;
885 void *owner
= AMDGPU_FENCE_OWNER_VM
;
886 unsigned nptes
, ncmds
, ndw
;
887 struct amdgpu_job
*job
;
888 struct amdgpu_pte_update_params params
;
889 struct fence
*f
= NULL
;
892 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
894 memset(¶ms
, 0, sizeof(params
));
897 params
.pages_addr
= pages_addr
;
899 /* sync to everything on unmapping */
900 if (!(flags
& AMDGPU_PTE_VALID
))
901 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
903 nptes
= last
- start
+ 1;
906 * reserve space for one command every (1 << BLOCK_SIZE)
907 * entries or 2k dwords (whatever is smaller)
909 ncmds
= (nptes
>> min(amdgpu_vm_block_size
, 11)) + 1;
915 /* only copy commands needed */
918 } else if (params
.pages_addr
) {
919 /* header for write data commands */
922 /* body of write data command */
926 /* set page commands needed */
929 /* two extra commands for begin/end of fragment */
933 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
937 params
.ib
= &job
->ibs
[0];
939 r
= amdgpu_sync_fence(adev
, &job
->sync
, exclusive
);
943 r
= amdgpu_sync_resv(adev
, &job
->sync
, vm
->page_directory
->tbo
.resv
,
948 r
= reservation_object_reserve_shared(vm
->page_directory
->tbo
.resv
);
952 amdgpu_vm_frag_ptes(¶ms
, vm
, start
, last
+ 1, addr
, flags
);
954 amdgpu_ring_pad_ib(ring
, params
.ib
);
955 WARN_ON(params
.ib
->length_dw
> ndw
);
956 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
957 AMDGPU_FENCE_OWNER_VM
, &f
);
961 amdgpu_bo_fence(vm
->page_directory
, f
, true);
964 *fence
= fence_get(f
);
970 amdgpu_job_free(job
);
975 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
977 * @adev: amdgpu_device pointer
978 * @exclusive: fence we need to sync to
979 * @gtt_flags: flags as they are used for GTT
980 * @pages_addr: DMA addresses to use for mapping
982 * @mapping: mapped range and flags to use for the update
983 * @addr: addr to set the area to
984 * @flags: HW flags for the mapping
985 * @fence: optional resulting fence
987 * Split the mapping into smaller chunks so that each update fits
989 * Returns 0 for success, -EINVAL for failure.
991 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device
*adev
,
992 struct fence
*exclusive
,
994 dma_addr_t
*pages_addr
,
995 struct amdgpu_vm
*vm
,
996 struct amdgpu_bo_va_mapping
*mapping
,
997 uint32_t flags
, uint64_t addr
,
998 struct fence
**fence
)
1000 const uint64_t max_size
= 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE
;
1002 uint64_t src
= 0, start
= mapping
->it
.start
;
1005 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1006 * but in case of something, we filter the flags in first place
1008 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1009 flags
&= ~AMDGPU_PTE_READABLE
;
1010 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1011 flags
&= ~AMDGPU_PTE_WRITEABLE
;
1013 trace_amdgpu_vm_bo_update(mapping
);
1016 if (flags
== gtt_flags
)
1017 src
= adev
->gart
.table_addr
+ (addr
>> 12) * 8;
1020 addr
+= mapping
->offset
;
1022 if (!pages_addr
|| src
)
1023 return amdgpu_vm_bo_update_mapping(adev
, exclusive
,
1024 src
, pages_addr
, vm
,
1025 start
, mapping
->it
.last
,
1026 flags
, addr
, fence
);
1028 while (start
!= mapping
->it
.last
+ 1) {
1031 last
= min((uint64_t)mapping
->it
.last
, start
+ max_size
- 1);
1032 r
= amdgpu_vm_bo_update_mapping(adev
, exclusive
,
1033 src
, pages_addr
, vm
,
1034 start
, last
, flags
, addr
,
1040 addr
+= max_size
* AMDGPU_GPU_PAGE_SIZE
;
1047 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1049 * @adev: amdgpu_device pointer
1050 * @bo_va: requested BO and VM object
1053 * Fill in the page table entries for @bo_va.
1054 * Returns 0 for success, -EINVAL for failure.
1056 * Object have to be reserved and mutex must be locked!
1058 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
1059 struct amdgpu_bo_va
*bo_va
,
1060 struct ttm_mem_reg
*mem
)
1062 struct amdgpu_vm
*vm
= bo_va
->vm
;
1063 struct amdgpu_bo_va_mapping
*mapping
;
1064 dma_addr_t
*pages_addr
= NULL
;
1065 uint32_t gtt_flags
, flags
;
1066 struct fence
*exclusive
;
1071 struct ttm_dma_tt
*ttm
;
1073 addr
= (u64
)mem
->start
<< PAGE_SHIFT
;
1074 switch (mem
->mem_type
) {
1076 ttm
= container_of(bo_va
->bo
->tbo
.ttm
, struct
1078 pages_addr
= ttm
->dma_address
;
1082 addr
+= adev
->vm_manager
.vram_base_offset
;
1089 exclusive
= reservation_object_get_excl(bo_va
->bo
->tbo
.resv
);
1095 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo_va
->bo
->tbo
.ttm
, mem
);
1096 gtt_flags
= (adev
== bo_va
->bo
->adev
) ? flags
: 0;
1098 spin_lock(&vm
->status_lock
);
1099 if (!list_empty(&bo_va
->vm_status
))
1100 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1101 spin_unlock(&vm
->status_lock
);
1103 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1104 r
= amdgpu_vm_bo_split_mapping(adev
, exclusive
,
1105 gtt_flags
, pages_addr
, vm
,
1106 mapping
, flags
, addr
,
1107 &bo_va
->last_pt_update
);
1112 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1113 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1114 trace_amdgpu_vm_bo_mapping(mapping
);
1116 list_for_each_entry(mapping
, &bo_va
->invalids
, list
)
1117 trace_amdgpu_vm_bo_mapping(mapping
);
1120 spin_lock(&vm
->status_lock
);
1121 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1122 list_del_init(&bo_va
->vm_status
);
1124 list_add(&bo_va
->vm_status
, &vm
->cleared
);
1125 spin_unlock(&vm
->status_lock
);
1131 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1133 * @adev: amdgpu_device pointer
1136 * Make sure all freed BOs are cleared in the PT.
1137 * Returns 0 for success.
1139 * PTs have to be reserved and mutex must be locked!
1141 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
1142 struct amdgpu_vm
*vm
)
1144 struct amdgpu_bo_va_mapping
*mapping
;
1147 while (!list_empty(&vm
->freed
)) {
1148 mapping
= list_first_entry(&vm
->freed
,
1149 struct amdgpu_bo_va_mapping
, list
);
1150 list_del(&mapping
->list
);
1152 r
= amdgpu_vm_bo_split_mapping(adev
, NULL
, 0, NULL
, vm
, mapping
,
1164 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1166 * @adev: amdgpu_device pointer
1169 * Make sure all invalidated BOs are cleared in the PT.
1170 * Returns 0 for success.
1172 * PTs have to be reserved and mutex must be locked!
1174 int amdgpu_vm_clear_invalids(struct amdgpu_device
*adev
,
1175 struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
1177 struct amdgpu_bo_va
*bo_va
= NULL
;
1180 spin_lock(&vm
->status_lock
);
1181 while (!list_empty(&vm
->invalidated
)) {
1182 bo_va
= list_first_entry(&vm
->invalidated
,
1183 struct amdgpu_bo_va
, vm_status
);
1184 spin_unlock(&vm
->status_lock
);
1186 r
= amdgpu_vm_bo_update(adev
, bo_va
, NULL
);
1190 spin_lock(&vm
->status_lock
);
1192 spin_unlock(&vm
->status_lock
);
1195 r
= amdgpu_sync_fence(adev
, sync
, bo_va
->last_pt_update
);
1201 * amdgpu_vm_bo_add - add a bo to a specific vm
1203 * @adev: amdgpu_device pointer
1205 * @bo: amdgpu buffer object
1207 * Add @bo into the requested vm.
1208 * Add @bo to the list of bos associated with the vm
1209 * Returns newly added bo_va or NULL for failure
1211 * Object has to be reserved!
1213 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
1214 struct amdgpu_vm
*vm
,
1215 struct amdgpu_bo
*bo
)
1217 struct amdgpu_bo_va
*bo_va
;
1219 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
1220 if (bo_va
== NULL
) {
1225 bo_va
->ref_count
= 1;
1226 INIT_LIST_HEAD(&bo_va
->bo_list
);
1227 INIT_LIST_HEAD(&bo_va
->valids
);
1228 INIT_LIST_HEAD(&bo_va
->invalids
);
1229 INIT_LIST_HEAD(&bo_va
->vm_status
);
1231 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
1237 * amdgpu_vm_bo_map - map bo inside a vm
1239 * @adev: amdgpu_device pointer
1240 * @bo_va: bo_va to store the address
1241 * @saddr: where to map the BO
1242 * @offset: requested offset in the BO
1243 * @flags: attributes of pages (read/write/valid/etc.)
1245 * Add a mapping of the BO at the specefied addr into the VM.
1246 * Returns 0 for success, error for failure.
1248 * Object has to be reserved and unreserved outside!
1250 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
1251 struct amdgpu_bo_va
*bo_va
,
1252 uint64_t saddr
, uint64_t offset
,
1253 uint64_t size
, uint32_t flags
)
1255 struct amdgpu_bo_va_mapping
*mapping
;
1256 struct amdgpu_vm
*vm
= bo_va
->vm
;
1257 struct interval_tree_node
*it
;
1258 unsigned last_pfn
, pt_idx
;
1262 /* validate the parameters */
1263 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
1264 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
1267 /* make sure object fit at this offset */
1268 eaddr
= saddr
+ size
- 1;
1269 if ((saddr
>= eaddr
) || (offset
+ size
> amdgpu_bo_size(bo_va
->bo
)))
1272 last_pfn
= eaddr
/ AMDGPU_GPU_PAGE_SIZE
;
1273 if (last_pfn
>= adev
->vm_manager
.max_pfn
) {
1274 dev_err(adev
->dev
, "va above limit (0x%08X >= 0x%08X)\n",
1275 last_pfn
, adev
->vm_manager
.max_pfn
);
1279 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1280 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
1282 it
= interval_tree_iter_first(&vm
->va
, saddr
, eaddr
);
1284 struct amdgpu_bo_va_mapping
*tmp
;
1285 tmp
= container_of(it
, struct amdgpu_bo_va_mapping
, it
);
1286 /* bo and tmp overlap, invalid addr */
1287 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1288 "0x%010lx-0x%010lx\n", bo_va
->bo
, saddr
, eaddr
,
1289 tmp
->it
.start
, tmp
->it
.last
+ 1);
1294 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
1300 INIT_LIST_HEAD(&mapping
->list
);
1301 mapping
->it
.start
= saddr
;
1302 mapping
->it
.last
= eaddr
;
1303 mapping
->offset
= offset
;
1304 mapping
->flags
= flags
;
1306 list_add(&mapping
->list
, &bo_va
->invalids
);
1307 interval_tree_insert(&mapping
->it
, &vm
->va
);
1309 /* Make sure the page tables are allocated */
1310 saddr
>>= amdgpu_vm_block_size
;
1311 eaddr
>>= amdgpu_vm_block_size
;
1313 BUG_ON(eaddr
>= amdgpu_vm_num_pdes(adev
));
1315 if (eaddr
> vm
->max_pde_used
)
1316 vm
->max_pde_used
= eaddr
;
1318 /* walk over the address space and allocate the page tables */
1319 for (pt_idx
= saddr
; pt_idx
<= eaddr
; ++pt_idx
) {
1320 struct reservation_object
*resv
= vm
->page_directory
->tbo
.resv
;
1321 struct amdgpu_bo_list_entry
*entry
;
1322 struct amdgpu_bo
*pt
;
1324 entry
= &vm
->page_tables
[pt_idx
].entry
;
1328 r
= amdgpu_bo_create(adev
, AMDGPU_VM_PTE_COUNT
* 8,
1329 AMDGPU_GPU_PAGE_SIZE
, true,
1330 AMDGPU_GEM_DOMAIN_VRAM
,
1331 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
,
1336 /* Keep a reference to the page table to avoid freeing
1337 * them up in the wrong order.
1339 pt
->parent
= amdgpu_bo_ref(vm
->page_directory
);
1341 r
= amdgpu_vm_clear_bo(adev
, vm
, pt
);
1343 amdgpu_bo_unref(&pt
);
1348 entry
->priority
= 0;
1349 entry
->tv
.bo
= &entry
->robj
->tbo
;
1350 entry
->tv
.shared
= true;
1351 entry
->user_pages
= NULL
;
1352 vm
->page_tables
[pt_idx
].addr
= 0;
1358 list_del(&mapping
->list
);
1359 interval_tree_remove(&mapping
->it
, &vm
->va
);
1360 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1368 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1370 * @adev: amdgpu_device pointer
1371 * @bo_va: bo_va to remove the address from
1372 * @saddr: where to the BO is mapped
1374 * Remove a mapping of the BO at the specefied addr from the VM.
1375 * Returns 0 for success, error for failure.
1377 * Object has to be reserved and unreserved outside!
1379 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
1380 struct amdgpu_bo_va
*bo_va
,
1383 struct amdgpu_bo_va_mapping
*mapping
;
1384 struct amdgpu_vm
*vm
= bo_va
->vm
;
1387 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1389 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
1390 if (mapping
->it
.start
== saddr
)
1394 if (&mapping
->list
== &bo_va
->valids
) {
1397 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1398 if (mapping
->it
.start
== saddr
)
1402 if (&mapping
->list
== &bo_va
->invalids
)
1406 list_del(&mapping
->list
);
1407 interval_tree_remove(&mapping
->it
, &vm
->va
);
1408 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1411 list_add(&mapping
->list
, &vm
->freed
);
1419 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1421 * @adev: amdgpu_device pointer
1422 * @bo_va: requested bo_va
1424 * Remove @bo_va->bo from the requested vm.
1426 * Object have to be reserved!
1428 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
1429 struct amdgpu_bo_va
*bo_va
)
1431 struct amdgpu_bo_va_mapping
*mapping
, *next
;
1432 struct amdgpu_vm
*vm
= bo_va
->vm
;
1434 list_del(&bo_va
->bo_list
);
1436 spin_lock(&vm
->status_lock
);
1437 list_del(&bo_va
->vm_status
);
1438 spin_unlock(&vm
->status_lock
);
1440 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
1441 list_del(&mapping
->list
);
1442 interval_tree_remove(&mapping
->it
, &vm
->va
);
1443 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1444 list_add(&mapping
->list
, &vm
->freed
);
1446 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
1447 list_del(&mapping
->list
);
1448 interval_tree_remove(&mapping
->it
, &vm
->va
);
1452 fence_put(bo_va
->last_pt_update
);
1457 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1459 * @adev: amdgpu_device pointer
1461 * @bo: amdgpu buffer object
1463 * Mark @bo as invalid.
1465 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
1466 struct amdgpu_bo
*bo
)
1468 struct amdgpu_bo_va
*bo_va
;
1470 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
1471 spin_lock(&bo_va
->vm
->status_lock
);
1472 if (list_empty(&bo_va
->vm_status
))
1473 list_add(&bo_va
->vm_status
, &bo_va
->vm
->invalidated
);
1474 spin_unlock(&bo_va
->vm
->status_lock
);
1479 * amdgpu_vm_init - initialize a vm instance
1481 * @adev: amdgpu_device pointer
1486 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1488 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
1489 AMDGPU_VM_PTE_COUNT
* 8);
1490 unsigned pd_size
, pd_entries
;
1491 unsigned ring_instance
;
1492 struct amdgpu_ring
*ring
;
1493 struct amd_sched_rq
*rq
;
1496 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
1499 vm
->client_id
= atomic64_inc_return(&adev
->vm_manager
.client_counter
);
1500 spin_lock_init(&vm
->status_lock
);
1501 INIT_LIST_HEAD(&vm
->invalidated
);
1502 INIT_LIST_HEAD(&vm
->cleared
);
1503 INIT_LIST_HEAD(&vm
->freed
);
1505 pd_size
= amdgpu_vm_directory_size(adev
);
1506 pd_entries
= amdgpu_vm_num_pdes(adev
);
1508 /* allocate page table array */
1509 vm
->page_tables
= drm_calloc_large(pd_entries
, sizeof(struct amdgpu_vm_pt
));
1510 if (vm
->page_tables
== NULL
) {
1511 DRM_ERROR("Cannot allocate memory for page table array\n");
1515 /* create scheduler entity for page table updates */
1517 ring_instance
= atomic_inc_return(&adev
->vm_manager
.vm_pte_next_ring
);
1518 ring_instance
%= adev
->vm_manager
.vm_pte_num_rings
;
1519 ring
= adev
->vm_manager
.vm_pte_rings
[ring_instance
];
1520 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_KERNEL
];
1521 r
= amd_sched_entity_init(&ring
->sched
, &vm
->entity
,
1522 rq
, amdgpu_sched_jobs
);
1526 vm
->page_directory_fence
= NULL
;
1528 r
= amdgpu_bo_create(adev
, pd_size
, align
, true,
1529 AMDGPU_GEM_DOMAIN_VRAM
,
1530 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
,
1531 NULL
, NULL
, &vm
->page_directory
);
1533 goto error_free_sched_entity
;
1535 r
= amdgpu_bo_reserve(vm
->page_directory
, false);
1537 goto error_free_page_directory
;
1539 r
= amdgpu_vm_clear_bo(adev
, vm
, vm
->page_directory
);
1540 amdgpu_bo_unreserve(vm
->page_directory
);
1542 goto error_free_page_directory
;
1543 vm
->last_eviction_counter
= atomic64_read(&adev
->num_evictions
);
1547 error_free_page_directory
:
1548 amdgpu_bo_unref(&vm
->page_directory
);
1549 vm
->page_directory
= NULL
;
1551 error_free_sched_entity
:
1552 amd_sched_entity_fini(&ring
->sched
, &vm
->entity
);
1558 * amdgpu_vm_fini - tear down a vm instance
1560 * @adev: amdgpu_device pointer
1564 * Unbind the VM and remove all bos from the vm bo list
1566 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1568 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
1571 amd_sched_entity_fini(vm
->entity
.sched
, &vm
->entity
);
1573 if (!RB_EMPTY_ROOT(&vm
->va
)) {
1574 dev_err(adev
->dev
, "still active bo inside vm\n");
1576 rbtree_postorder_for_each_entry_safe(mapping
, tmp
, &vm
->va
, it
.rb
) {
1577 list_del(&mapping
->list
);
1578 interval_tree_remove(&mapping
->it
, &vm
->va
);
1581 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
1582 list_del(&mapping
->list
);
1586 for (i
= 0; i
< amdgpu_vm_num_pdes(adev
); i
++)
1587 amdgpu_bo_unref(&vm
->page_tables
[i
].entry
.robj
);
1588 drm_free_large(vm
->page_tables
);
1590 amdgpu_bo_unref(&vm
->page_directory
);
1591 fence_put(vm
->page_directory_fence
);
1595 * amdgpu_vm_manager_init - init the VM manager
1597 * @adev: amdgpu_device pointer
1599 * Initialize the VM manager structures
1601 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
1605 INIT_LIST_HEAD(&adev
->vm_manager
.ids_lru
);
1607 /* skip over VMID 0, since it is the system VM */
1608 for (i
= 1; i
< adev
->vm_manager
.num_ids
; ++i
) {
1609 amdgpu_vm_reset_id(adev
, i
);
1610 amdgpu_sync_create(&adev
->vm_manager
.ids
[i
].active
);
1611 list_add_tail(&adev
->vm_manager
.ids
[i
].list
,
1612 &adev
->vm_manager
.ids_lru
);
1615 adev
->vm_manager
.fence_context
= fence_context_alloc(AMDGPU_MAX_RINGS
);
1616 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
1617 adev
->vm_manager
.seqno
[i
] = 0;
1619 atomic_set(&adev
->vm_manager
.vm_pte_next_ring
, 0);
1620 atomic64_set(&adev
->vm_manager
.client_counter
, 0);
1624 * amdgpu_vm_manager_fini - cleanup VM manager
1626 * @adev: amdgpu_device pointer
1628 * Cleanup the VM manager and free resources.
1630 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
1634 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
) {
1635 struct amdgpu_vm_id
*id
= &adev
->vm_manager
.ids
[i
];
1637 fence_put(adev
->vm_manager
.ids
[i
].first
);
1638 amdgpu_sync_free(&adev
->vm_manager
.ids
[i
].active
);
1639 fence_put(id
->flushed_updates
);