2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
31 #include <drm/amdgpu_drm.h>
33 #include "amdgpu_trace.h"
37 * GPUVM is similar to the legacy gart on older asics, however
38 * rather than there being a single global gart table
39 * for the entire GPU, there are multiple VM page tables active
40 * at any given time. The VM page tables can contain a mix
41 * vram pages and system memory pages and system memory pages
42 * can be mapped as snooped (cached system pages) or unsnooped
43 * (uncached system pages).
44 * Each VM has an ID associated with it and there is a page table
45 * associated with each VMID. When execting a command buffer,
46 * the kernel tells the the ring what VMID to use for that command
47 * buffer. VMIDs are allocated dynamically as commands are submitted.
48 * The userspace drivers maintain their own address space and the kernel
49 * sets up their pages tables accordingly when they submit their
50 * command buffers and a VMID is assigned.
51 * Cayman/Trinity support up to 8 active VMs at any given time;
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping
, rb
, uint64_t, __subtree_last
,
59 START
, LAST
, static, amdgpu_vm_it
)
64 /* Local structure. Encapsulate some VM table update parameters to reduce
65 * the number of function parameters
67 struct amdgpu_pte_update_params
{
68 /* amdgpu device we do this update for */
69 struct amdgpu_device
*adev
;
70 /* optional amdgpu_vm we do this update for */
72 /* address where to copy page table entries from */
74 /* indirect buffer to fill with commands */
76 /* Function which actually does the update */
77 void (*func
)(struct amdgpu_pte_update_params
*params
, uint64_t pe
,
78 uint64_t addr
, unsigned count
, uint32_t incr
,
80 /* indicate update pt or its shadow */
84 /* Helper to disable partial resident texture feature from a fence callback */
85 struct amdgpu_prt_cb
{
86 struct amdgpu_device
*adev
;
87 struct dma_fence_cb cb
;
91 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
93 * @adev: amdgpu_device pointer
95 * Calculate the number of entries in a page directory or page table.
97 static unsigned amdgpu_vm_num_entries(struct amdgpu_device
*adev
,
101 /* For the root directory */
102 return adev
->vm_manager
.max_pfn
>>
103 (adev
->vm_manager
.block_size
*
104 adev
->vm_manager
.num_level
);
105 else if (level
== adev
->vm_manager
.num_level
)
106 /* For the page tables on the leaves */
107 return AMDGPU_VM_PTE_COUNT(adev
);
109 /* Everything in between */
110 return 1 << adev
->vm_manager
.block_size
;
114 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
116 * @adev: amdgpu_device pointer
118 * Calculate the size of the BO for a page directory or page table in bytes.
120 static unsigned amdgpu_vm_bo_size(struct amdgpu_device
*adev
, unsigned level
)
122 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev
, level
) * 8);
126 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
128 * @vm: vm providing the BOs
129 * @validated: head of validation list
130 * @entry: entry to add
132 * Add the page directory to the list of BOs to
133 * validate for command submission.
135 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
136 struct list_head
*validated
,
137 struct amdgpu_bo_list_entry
*entry
)
139 entry
->robj
= vm
->root
.bo
;
141 entry
->tv
.bo
= &entry
->robj
->tbo
;
142 entry
->tv
.shared
= true;
143 entry
->user_pages
= NULL
;
144 list_add(&entry
->tv
.head
, validated
);
148 * amdgpu_vm_validate_layer - validate a single page table level
150 * @parent: parent page table level
151 * @validate: callback to do the validation
152 * @param: parameter for the validation callback
154 * Validate the page table BOs on command submission if neccessary.
156 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt
*parent
,
157 int (*validate
)(void *, struct amdgpu_bo
*),
163 if (!parent
->entries
)
166 for (i
= 0; i
<= parent
->last_entry_used
; ++i
) {
167 struct amdgpu_vm_pt
*entry
= &parent
->entries
[i
];
172 r
= validate(param
, entry
->bo
);
177 * Recurse into the sub directory. This is harmless because we
178 * have only a maximum of 5 layers.
180 r
= amdgpu_vm_validate_level(entry
, validate
, param
);
189 * amdgpu_vm_validate_pt_bos - validate the page table BOs
191 * @adev: amdgpu device pointer
192 * @vm: vm providing the BOs
193 * @validate: callback to do the validation
194 * @param: parameter for the validation callback
196 * Validate the page table BOs on command submission if neccessary.
198 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
199 int (*validate
)(void *p
, struct amdgpu_bo
*bo
),
202 uint64_t num_evictions
;
204 /* We only need to validate the page tables
205 * if they aren't already valid.
207 num_evictions
= atomic64_read(&adev
->num_evictions
);
208 if (num_evictions
== vm
->last_eviction_counter
)
211 return amdgpu_vm_validate_level(&vm
->root
, validate
, param
);
215 * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
217 * @adev: amdgpu device instance
218 * @vm: vm providing the BOs
220 * Move the PT BOs to the tail of the LRU.
222 static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt
*parent
)
226 if (!parent
->entries
)
229 for (i
= 0; i
<= parent
->last_entry_used
; ++i
) {
230 struct amdgpu_vm_pt
*entry
= &parent
->entries
[i
];
235 ttm_bo_move_to_lru_tail(&entry
->bo
->tbo
);
236 amdgpu_vm_move_level_in_lru(entry
);
241 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
243 * @adev: amdgpu device instance
244 * @vm: vm providing the BOs
246 * Move the PT BOs to the tail of the LRU.
248 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device
*adev
,
249 struct amdgpu_vm
*vm
)
251 struct ttm_bo_global
*glob
= adev
->mman
.bdev
.glob
;
253 spin_lock(&glob
->lru_lock
);
254 amdgpu_vm_move_level_in_lru(&vm
->root
);
255 spin_unlock(&glob
->lru_lock
);
259 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
261 * @adev: amdgpu_device pointer
263 * @saddr: start of the address range
264 * @eaddr: end of the address range
266 * Make sure the page directories and page tables are allocated
268 static int amdgpu_vm_alloc_levels(struct amdgpu_device
*adev
,
269 struct amdgpu_vm
*vm
,
270 struct amdgpu_vm_pt
*parent
,
271 uint64_t saddr
, uint64_t eaddr
,
274 unsigned shift
= (adev
->vm_manager
.num_level
- level
) *
275 adev
->vm_manager
.block_size
;
276 unsigned pt_idx
, from
, to
;
279 if (!parent
->entries
) {
280 unsigned num_entries
= amdgpu_vm_num_entries(adev
, level
);
282 parent
->entries
= drm_calloc_large(num_entries
,
283 sizeof(struct amdgpu_vm_pt
));
284 if (!parent
->entries
)
286 memset(parent
->entries
, 0 , sizeof(struct amdgpu_vm_pt
));
289 from
= saddr
>> shift
;
291 if (from
>= amdgpu_vm_num_entries(adev
, level
) ||
292 to
>= amdgpu_vm_num_entries(adev
, level
))
295 if (to
> parent
->last_entry_used
)
296 parent
->last_entry_used
= to
;
299 saddr
= saddr
& ((1 << shift
) - 1);
300 eaddr
= eaddr
& ((1 << shift
) - 1);
302 /* walk over the address space and allocate the page tables */
303 for (pt_idx
= from
; pt_idx
<= to
; ++pt_idx
) {
304 struct reservation_object
*resv
= vm
->root
.bo
->tbo
.resv
;
305 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
306 struct amdgpu_bo
*pt
;
309 r
= amdgpu_bo_create(adev
,
310 amdgpu_vm_bo_size(adev
, level
),
311 AMDGPU_GPU_PAGE_SIZE
, true,
312 AMDGPU_GEM_DOMAIN_VRAM
,
313 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
314 AMDGPU_GEM_CREATE_SHADOW
|
315 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
316 AMDGPU_GEM_CREATE_VRAM_CLEARED
,
321 /* Keep a reference to the root directory to avoid
322 * freeing them up in the wrong order.
324 pt
->parent
= amdgpu_bo_ref(vm
->root
.bo
);
330 if (level
< adev
->vm_manager
.num_level
) {
331 uint64_t sub_saddr
= (pt_idx
== from
) ? saddr
: 0;
332 uint64_t sub_eaddr
= (pt_idx
== to
) ? eaddr
:
334 r
= amdgpu_vm_alloc_levels(adev
, vm
, entry
, sub_saddr
,
345 * amdgpu_vm_alloc_pts - Allocate page tables.
347 * @adev: amdgpu_device pointer
348 * @vm: VM to allocate page tables for
349 * @saddr: Start address which needs to be allocated
350 * @size: Size from start address we need.
352 * Make sure the page tables are allocated.
354 int amdgpu_vm_alloc_pts(struct amdgpu_device
*adev
,
355 struct amdgpu_vm
*vm
,
356 uint64_t saddr
, uint64_t size
)
361 /* validate the parameters */
362 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| size
& AMDGPU_GPU_PAGE_MASK
)
365 eaddr
= saddr
+ size
- 1;
366 last_pfn
= eaddr
/ AMDGPU_GPU_PAGE_SIZE
;
367 if (last_pfn
>= adev
->vm_manager
.max_pfn
) {
368 dev_err(adev
->dev
, "va above limit (0x%08llX >= 0x%08llX)\n",
369 last_pfn
, adev
->vm_manager
.max_pfn
);
373 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
374 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
376 return amdgpu_vm_alloc_levels(adev
, vm
, &vm
->root
, saddr
, eaddr
, 0);
380 * amdgpu_vm_had_gpu_reset - check if reset occured since last use
382 * @adev: amdgpu_device pointer
383 * @id: VMID structure
385 * Check if GPU reset occured since last use of the VMID.
387 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device
*adev
,
388 struct amdgpu_vm_id
*id
)
390 return id
->current_gpu_reset_count
!=
391 atomic_read(&adev
->gpu_reset_counter
);
395 * amdgpu_vm_grab_id - allocate the next free VMID
397 * @vm: vm to allocate id for
398 * @ring: ring we want to submit job to
399 * @sync: sync object where we add dependencies
400 * @fence: fence protecting ID from reuse
402 * Allocate an id for the vm, adding fences to the sync obj as necessary.
404 int amdgpu_vm_grab_id(struct amdgpu_vm
*vm
, struct amdgpu_ring
*ring
,
405 struct amdgpu_sync
*sync
, struct dma_fence
*fence
,
406 struct amdgpu_job
*job
)
408 struct amdgpu_device
*adev
= ring
->adev
;
409 unsigned vmhub
= ring
->funcs
->vmhub
;
410 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
411 uint64_t fence_context
= adev
->fence_context
+ ring
->idx
;
412 struct dma_fence
*updates
= sync
->last_vm_update
;
413 struct amdgpu_vm_id
*id
, *idle
;
414 struct dma_fence
**fences
;
418 fences
= kmalloc_array(sizeof(void *), id_mgr
->num_ids
, GFP_KERNEL
);
422 mutex_lock(&id_mgr
->lock
);
424 /* Check if we have an idle VMID */
426 list_for_each_entry(idle
, &id_mgr
->ids_lru
, list
) {
427 fences
[i
] = amdgpu_sync_peek_fence(&idle
->active
, ring
);
433 /* If we can't find a idle VMID to use, wait till one becomes available */
434 if (&idle
->list
== &id_mgr
->ids_lru
) {
435 u64 fence_context
= adev
->vm_manager
.fence_context
+ ring
->idx
;
436 unsigned seqno
= ++adev
->vm_manager
.seqno
[ring
->idx
];
437 struct dma_fence_array
*array
;
440 for (j
= 0; j
< i
; ++j
)
441 dma_fence_get(fences
[j
]);
443 array
= dma_fence_array_create(i
, fences
, fence_context
,
446 for (j
= 0; j
< i
; ++j
)
447 dma_fence_put(fences
[j
]);
454 r
= amdgpu_sync_fence(ring
->adev
, sync
, &array
->base
);
455 dma_fence_put(&array
->base
);
459 mutex_unlock(&id_mgr
->lock
);
465 job
->vm_needs_flush
= false;
466 /* Check if we can use a VMID already assigned to this VM */
467 list_for_each_entry_reverse(id
, &id_mgr
->ids_lru
, list
) {
468 struct dma_fence
*flushed
;
469 bool needs_flush
= false;
471 /* Check all the prerequisites to using this VMID */
472 if (amdgpu_vm_had_gpu_reset(adev
, id
))
475 if (atomic64_read(&id
->owner
) != vm
->client_id
)
478 if (job
->vm_pd_addr
!= id
->pd_gpu_addr
)
481 if (!id
->last_flush
||
482 (id
->last_flush
->context
!= fence_context
&&
483 !dma_fence_is_signaled(id
->last_flush
)))
486 flushed
= id
->flushed_updates
;
487 if (updates
&& (!flushed
|| dma_fence_is_later(updates
, flushed
)))
490 /* Concurrent flushes are only possible starting with Vega10 */
491 if (adev
->asic_type
< CHIP_VEGA10
&& needs_flush
)
494 /* Good we can use this VMID. Remember this submission as
497 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
501 if (updates
&& (!flushed
|| dma_fence_is_later(updates
, flushed
))) {
502 dma_fence_put(id
->flushed_updates
);
503 id
->flushed_updates
= dma_fence_get(updates
);
509 goto no_flush_needed
;
513 /* Still no ID to use? Then use the idle one found earlier */
516 /* Remember this submission as user of the VMID */
517 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
521 id
->pd_gpu_addr
= job
->vm_pd_addr
;
522 dma_fence_put(id
->flushed_updates
);
523 id
->flushed_updates
= dma_fence_get(updates
);
524 id
->current_gpu_reset_count
= atomic_read(&adev
->gpu_reset_counter
);
525 atomic64_set(&id
->owner
, vm
->client_id
);
528 job
->vm_needs_flush
= true;
529 dma_fence_put(id
->last_flush
);
530 id
->last_flush
= NULL
;
533 list_move_tail(&id
->list
, &id_mgr
->ids_lru
);
535 job
->vm_id
= id
- id_mgr
->ids
;
536 trace_amdgpu_vm_grab_id(vm
, ring
, job
);
539 mutex_unlock(&id_mgr
->lock
);
543 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring
*ring
)
545 struct amdgpu_device
*adev
= ring
->adev
;
546 const struct amdgpu_ip_block
*ip_block
;
548 if (ring
->funcs
->type
!= AMDGPU_RING_TYPE_COMPUTE
)
549 /* only compute rings */
552 ip_block
= amdgpu_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
556 if (ip_block
->version
->major
<= 7) {
557 /* gfx7 has no workaround */
559 } else if (ip_block
->version
->major
== 8) {
560 if (adev
->gfx
.mec_fw_version
>= 673)
561 /* gfx8 is fixed in MEC firmware 673 */
569 static u64
amdgpu_vm_adjust_mc_addr(struct amdgpu_device
*adev
, u64 mc_addr
)
573 if (adev
->gart
.gart_funcs
->adjust_mc_addr
)
574 addr
= adev
->gart
.gart_funcs
->adjust_mc_addr(adev
, addr
);
580 * amdgpu_vm_flush - hardware flush the vm
582 * @ring: ring to use for flush
583 * @vm_id: vmid number to use
584 * @pd_addr: address of the page directory
586 * Emit a VM flush when it is necessary.
588 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
)
590 struct amdgpu_device
*adev
= ring
->adev
;
591 unsigned vmhub
= ring
->funcs
->vmhub
;
592 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
593 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[job
->vm_id
];
594 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
595 id
->gds_base
!= job
->gds_base
||
596 id
->gds_size
!= job
->gds_size
||
597 id
->gws_base
!= job
->gws_base
||
598 id
->gws_size
!= job
->gws_size
||
599 id
->oa_base
!= job
->oa_base
||
600 id
->oa_size
!= job
->oa_size
);
601 bool vm_flush_needed
= job
->vm_needs_flush
||
602 amdgpu_vm_ring_has_compute_vm_bug(ring
);
603 unsigned patch_offset
= 0;
606 if (amdgpu_vm_had_gpu_reset(adev
, id
)) {
607 gds_switch_needed
= true;
608 vm_flush_needed
= true;
611 if (!vm_flush_needed
&& !gds_switch_needed
)
614 if (ring
->funcs
->init_cond_exec
)
615 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
617 if (ring
->funcs
->emit_pipeline_sync
&& !job
->need_pipeline_sync
)
618 amdgpu_ring_emit_pipeline_sync(ring
);
620 if (ring
->funcs
->emit_vm_flush
&& vm_flush_needed
) {
621 u64 pd_addr
= amdgpu_vm_adjust_mc_addr(adev
, job
->vm_pd_addr
);
622 struct dma_fence
*fence
;
624 trace_amdgpu_vm_flush(ring
, job
->vm_id
, pd_addr
);
625 amdgpu_ring_emit_vm_flush(ring
, job
->vm_id
, pd_addr
);
627 r
= amdgpu_fence_emit(ring
, &fence
);
631 mutex_lock(&id_mgr
->lock
);
632 dma_fence_put(id
->last_flush
);
633 id
->last_flush
= fence
;
634 mutex_unlock(&id_mgr
->lock
);
637 if (ring
->funcs
->emit_gds_switch
&& gds_switch_needed
) {
638 id
->gds_base
= job
->gds_base
;
639 id
->gds_size
= job
->gds_size
;
640 id
->gws_base
= job
->gws_base
;
641 id
->gws_size
= job
->gws_size
;
642 id
->oa_base
= job
->oa_base
;
643 id
->oa_size
= job
->oa_size
;
644 amdgpu_ring_emit_gds_switch(ring
, job
->vm_id
, job
->gds_base
,
645 job
->gds_size
, job
->gws_base
,
646 job
->gws_size
, job
->oa_base
,
650 if (ring
->funcs
->patch_cond_exec
)
651 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
653 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
654 if (ring
->funcs
->emit_switch_buffer
) {
655 amdgpu_ring_emit_switch_buffer(ring
);
656 amdgpu_ring_emit_switch_buffer(ring
);
662 * amdgpu_vm_reset_id - reset VMID to zero
664 * @adev: amdgpu device structure
665 * @vm_id: vmid number to use
667 * Reset saved GDW, GWS and OA to force switch on next flush.
669 void amdgpu_vm_reset_id(struct amdgpu_device
*adev
, unsigned vmhub
,
672 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
673 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[vmid
];
675 atomic64_set(&id
->owner
, 0);
685 * amdgpu_vm_reset_all_id - reset VMID to zero
687 * @adev: amdgpu device structure
689 * Reset VMID to force flush on next use
691 void amdgpu_vm_reset_all_ids(struct amdgpu_device
*adev
)
695 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
696 struct amdgpu_vm_id_manager
*id_mgr
=
697 &adev
->vm_manager
.id_mgr
[i
];
699 for (j
= 1; j
< id_mgr
->num_ids
; ++j
)
700 amdgpu_vm_reset_id(adev
, i
, j
);
705 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
708 * @bo: requested buffer object
710 * Find @bo inside the requested vm.
711 * Search inside the @bos vm list for the requested vm
712 * Returns the found bo_va or NULL if none is found
714 * Object has to be reserved!
716 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
717 struct amdgpu_bo
*bo
)
719 struct amdgpu_bo_va
*bo_va
;
721 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
722 if (bo_va
->vm
== vm
) {
730 * amdgpu_vm_do_set_ptes - helper to call the right asic function
732 * @params: see amdgpu_pte_update_params definition
733 * @pe: addr of the page entry
734 * @addr: dst addr to write into pe
735 * @count: number of page entries to update
736 * @incr: increase next addr by incr bytes
737 * @flags: hw access flags
739 * Traces the parameters and calls the right asic functions
740 * to setup the page table using the DMA.
742 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params
*params
,
743 uint64_t pe
, uint64_t addr
,
744 unsigned count
, uint32_t incr
,
747 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
750 amdgpu_vm_write_pte(params
->adev
, params
->ib
, pe
,
751 addr
| flags
, count
, incr
);
754 amdgpu_vm_set_pte_pde(params
->adev
, params
->ib
, pe
, addr
,
760 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
762 * @params: see amdgpu_pte_update_params definition
763 * @pe: addr of the page entry
764 * @addr: dst addr to write into pe
765 * @count: number of page entries to update
766 * @incr: increase next addr by incr bytes
767 * @flags: hw access flags
769 * Traces the parameters and calls the DMA function to copy the PTEs.
771 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params
*params
,
772 uint64_t pe
, uint64_t addr
,
773 unsigned count
, uint32_t incr
,
776 uint64_t src
= (params
->src
+ (addr
>> 12) * 8);
779 trace_amdgpu_vm_copy_ptes(pe
, src
, count
);
781 amdgpu_vm_copy_pte(params
->adev
, params
->ib
, pe
, src
, count
);
785 * amdgpu_vm_map_gart - Resolve gart mapping of addr
787 * @pages_addr: optional DMA address to use for lookup
788 * @addr: the unmapped addr
790 * Look up the physical address of the page that the pte resolves
791 * to and return the pointer for the page table entry.
793 static uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
797 /* page table offset */
798 result
= pages_addr
[addr
>> PAGE_SHIFT
];
800 /* in case cpu page size != gpu page size*/
801 result
|= addr
& (~PAGE_MASK
);
803 result
&= 0xFFFFFFFFFFFFF000ULL
;
809 * amdgpu_vm_update_level - update a single level in the hierarchy
811 * @adev: amdgpu_device pointer
813 * @parent: parent directory
815 * Makes sure all entries in @parent are up to date.
816 * Returns 0 for success, error for failure.
818 static int amdgpu_vm_update_level(struct amdgpu_device
*adev
,
819 struct amdgpu_vm
*vm
,
820 struct amdgpu_vm_pt
*parent
,
823 struct amdgpu_bo
*shadow
;
824 struct amdgpu_ring
*ring
;
825 uint64_t pd_addr
, shadow_addr
;
826 uint32_t incr
= amdgpu_vm_bo_size(adev
, level
+ 1);
827 uint64_t last_pde
= ~0, last_pt
= ~0, last_shadow
= ~0;
828 unsigned count
= 0, pt_idx
, ndw
;
829 struct amdgpu_job
*job
;
830 struct amdgpu_pte_update_params params
;
831 struct dma_fence
*fence
= NULL
;
835 if (!parent
->entries
)
837 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
842 /* assume the worst case */
843 ndw
+= parent
->last_entry_used
* 6;
845 pd_addr
= amdgpu_bo_gpu_offset(parent
->bo
);
847 shadow
= parent
->bo
->shadow
;
849 r
= amdgpu_ttm_bind(&shadow
->tbo
, &shadow
->tbo
.mem
);
852 shadow_addr
= amdgpu_bo_gpu_offset(shadow
);
858 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
862 memset(¶ms
, 0, sizeof(params
));
864 params
.ib
= &job
->ibs
[0];
866 /* walk over the address space and update the directory */
867 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
868 struct amdgpu_bo
*bo
= parent
->entries
[pt_idx
].bo
;
875 struct amdgpu_bo
*pt_shadow
= bo
->shadow
;
877 r
= amdgpu_ttm_bind(&pt_shadow
->tbo
,
878 &pt_shadow
->tbo
.mem
);
883 pt
= amdgpu_bo_gpu_offset(bo
);
884 if (parent
->entries
[pt_idx
].addr
== pt
)
887 parent
->entries
[pt_idx
].addr
= pt
;
889 pde
= pd_addr
+ pt_idx
* 8;
890 if (((last_pde
+ 8 * count
) != pde
) ||
891 ((last_pt
+ incr
* count
) != pt
) ||
892 (count
== AMDGPU_VM_MAX_UPDATE_SIZE
)) {
896 amdgpu_vm_adjust_mc_addr(adev
, last_pt
);
899 amdgpu_vm_do_set_ptes(¶ms
,
905 amdgpu_vm_do_set_ptes(¶ms
, last_pde
,
906 pt_addr
, count
, incr
,
912 last_shadow
= shadow_addr
+ pt_idx
* 8;
920 uint64_t pt_addr
= amdgpu_vm_adjust_mc_addr(adev
, last_pt
);
922 if (vm
->root
.bo
->shadow
)
923 amdgpu_vm_do_set_ptes(¶ms
, last_shadow
, pt_addr
,
924 count
, incr
, AMDGPU_PTE_VALID
);
926 amdgpu_vm_do_set_ptes(¶ms
, last_pde
, pt_addr
,
927 count
, incr
, AMDGPU_PTE_VALID
);
930 if (params
.ib
->length_dw
== 0) {
931 amdgpu_job_free(job
);
933 amdgpu_ring_pad_ib(ring
, params
.ib
);
934 amdgpu_sync_resv(adev
, &job
->sync
, parent
->bo
->tbo
.resv
,
935 AMDGPU_FENCE_OWNER_VM
);
937 amdgpu_sync_resv(adev
, &job
->sync
, shadow
->tbo
.resv
,
938 AMDGPU_FENCE_OWNER_VM
);
940 WARN_ON(params
.ib
->length_dw
> ndw
);
941 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
942 AMDGPU_FENCE_OWNER_VM
, &fence
);
946 amdgpu_bo_fence(parent
->bo
, fence
, true);
947 dma_fence_put(vm
->last_dir_update
);
948 vm
->last_dir_update
= dma_fence_get(fence
);
949 dma_fence_put(fence
);
952 * Recurse into the subdirectories. This recursion is harmless because
953 * we only have a maximum of 5 layers.
955 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
956 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
961 r
= amdgpu_vm_update_level(adev
, vm
, entry
, level
+ 1);
969 amdgpu_job_free(job
);
974 * amdgpu_vm_update_directories - make sure that all directories are valid
976 * @adev: amdgpu_device pointer
979 * Makes sure all directories are up to date.
980 * Returns 0 for success, error for failure.
982 int amdgpu_vm_update_directories(struct amdgpu_device
*adev
,
983 struct amdgpu_vm
*vm
)
985 return amdgpu_vm_update_level(adev
, vm
, &vm
->root
, 0);
989 * amdgpu_vm_find_pt - find the page table for an address
991 * @p: see amdgpu_pte_update_params definition
992 * @addr: virtual address in question
994 * Find the page table BO for a virtual address, return NULL when none found.
996 static struct amdgpu_bo
*amdgpu_vm_get_pt(struct amdgpu_pte_update_params
*p
,
999 struct amdgpu_vm_pt
*entry
= &p
->vm
->root
;
1000 unsigned idx
, level
= p
->adev
->vm_manager
.num_level
;
1002 while (entry
->entries
) {
1003 idx
= addr
>> (p
->adev
->vm_manager
.block_size
* level
--);
1004 idx
%= amdgpu_bo_size(entry
->bo
) / 8;
1005 entry
= &entry
->entries
[idx
];
1015 * amdgpu_vm_update_ptes - make sure that page tables are valid
1017 * @params: see amdgpu_pte_update_params definition
1019 * @start: start of GPU address range
1020 * @end: end of GPU address range
1021 * @dst: destination address to map to, the next dst inside the function
1022 * @flags: mapping flags
1024 * Update the page tables in the range @start - @end.
1026 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params
*params
,
1027 uint64_t start
, uint64_t end
,
1028 uint64_t dst
, uint64_t flags
)
1030 struct amdgpu_device
*adev
= params
->adev
;
1031 const uint64_t mask
= AMDGPU_VM_PTE_COUNT(adev
) - 1;
1033 uint64_t cur_pe_start
, cur_nptes
, cur_dst
;
1034 uint64_t addr
; /* next GPU address to be updated */
1035 struct amdgpu_bo
*pt
;
1036 unsigned nptes
; /* next number of ptes to be updated */
1037 uint64_t next_pe_start
;
1039 /* initialize the variables */
1041 pt
= amdgpu_vm_get_pt(params
, addr
);
1043 pr_err("PT not found, aborting update_ptes\n");
1047 if (params
->shadow
) {
1052 if ((addr
& ~mask
) == (end
& ~mask
))
1055 nptes
= AMDGPU_VM_PTE_COUNT(adev
) - (addr
& mask
);
1057 cur_pe_start
= amdgpu_bo_gpu_offset(pt
);
1058 cur_pe_start
+= (addr
& mask
) * 8;
1064 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
1066 /* walk over the address space and update the page tables */
1067 while (addr
< end
) {
1068 pt
= amdgpu_vm_get_pt(params
, addr
);
1070 pr_err("PT not found, aborting update_ptes\n");
1074 if (params
->shadow
) {
1080 if ((addr
& ~mask
) == (end
& ~mask
))
1083 nptes
= AMDGPU_VM_PTE_COUNT(adev
) - (addr
& mask
);
1085 next_pe_start
= amdgpu_bo_gpu_offset(pt
);
1086 next_pe_start
+= (addr
& mask
) * 8;
1088 if ((cur_pe_start
+ 8 * cur_nptes
) == next_pe_start
&&
1089 ((cur_nptes
+ nptes
) <= AMDGPU_VM_MAX_UPDATE_SIZE
)) {
1090 /* The next ptb is consecutive to current ptb.
1091 * Don't call the update function now.
1092 * Will update two ptbs together in future.
1096 params
->func(params
, cur_pe_start
, cur_dst
, cur_nptes
,
1097 AMDGPU_GPU_PAGE_SIZE
, flags
);
1099 cur_pe_start
= next_pe_start
;
1106 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
1109 params
->func(params
, cur_pe_start
, cur_dst
, cur_nptes
,
1110 AMDGPU_GPU_PAGE_SIZE
, flags
);
1114 * amdgpu_vm_frag_ptes - add fragment information to PTEs
1116 * @params: see amdgpu_pte_update_params definition
1118 * @start: first PTE to handle
1119 * @end: last PTE to handle
1120 * @dst: addr those PTEs should point to
1121 * @flags: hw mapping flags
1123 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params
*params
,
1124 uint64_t start
, uint64_t end
,
1125 uint64_t dst
, uint64_t flags
)
1128 * The MC L1 TLB supports variable sized pages, based on a fragment
1129 * field in the PTE. When this field is set to a non-zero value, page
1130 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1131 * flags are considered valid for all PTEs within the fragment range
1132 * and corresponding mappings are assumed to be physically contiguous.
1134 * The L1 TLB can store a single PTE for the whole fragment,
1135 * significantly increasing the space available for translation
1136 * caching. This leads to large improvements in throughput when the
1137 * TLB is under pressure.
1139 * The L2 TLB distributes small and large fragments into two
1140 * asymmetric partitions. The large fragment cache is significantly
1141 * larger. Thus, we try to use large fragments wherever possible.
1142 * Userspace can support this by aligning virtual base address and
1143 * allocation size to the fragment size.
1146 /* SI and newer are optimized for 64KB */
1147 uint64_t frag_flags
= AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG
);
1148 uint64_t frag_align
= 1 << AMDGPU_LOG2_PAGES_PER_FRAG
;
1150 uint64_t frag_start
= ALIGN(start
, frag_align
);
1151 uint64_t frag_end
= end
& ~(frag_align
- 1);
1153 /* system pages are non continuously */
1154 if (params
->src
|| !(flags
& AMDGPU_PTE_VALID
) ||
1155 (frag_start
>= frag_end
)) {
1157 amdgpu_vm_update_ptes(params
, start
, end
, dst
, flags
);
1161 /* handle the 4K area at the beginning */
1162 if (start
!= frag_start
) {
1163 amdgpu_vm_update_ptes(params
, start
, frag_start
,
1165 dst
+= (frag_start
- start
) * AMDGPU_GPU_PAGE_SIZE
;
1168 /* handle the area in the middle */
1169 amdgpu_vm_update_ptes(params
, frag_start
, frag_end
, dst
,
1170 flags
| frag_flags
);
1172 /* handle the 4K area at the end */
1173 if (frag_end
!= end
) {
1174 dst
+= (frag_end
- frag_start
) * AMDGPU_GPU_PAGE_SIZE
;
1175 amdgpu_vm_update_ptes(params
, frag_end
, end
, dst
, flags
);
1180 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1182 * @adev: amdgpu_device pointer
1183 * @exclusive: fence we need to sync to
1184 * @src: address where to copy page table entries from
1185 * @pages_addr: DMA addresses to use for mapping
1187 * @start: start of mapped range
1188 * @last: last mapped entry
1189 * @flags: flags for the entries
1190 * @addr: addr to set the area to
1191 * @fence: optional resulting fence
1193 * Fill in the page table entries between @start and @last.
1194 * Returns 0 for success, -EINVAL for failure.
1196 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
1197 struct dma_fence
*exclusive
,
1199 dma_addr_t
*pages_addr
,
1200 struct amdgpu_vm
*vm
,
1201 uint64_t start
, uint64_t last
,
1202 uint64_t flags
, uint64_t addr
,
1203 struct dma_fence
**fence
)
1205 struct amdgpu_ring
*ring
;
1206 void *owner
= AMDGPU_FENCE_OWNER_VM
;
1207 unsigned nptes
, ncmds
, ndw
;
1208 struct amdgpu_job
*job
;
1209 struct amdgpu_pte_update_params params
;
1210 struct dma_fence
*f
= NULL
;
1213 memset(¶ms
, 0, sizeof(params
));
1218 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
1220 /* sync to everything on unmapping */
1221 if (!(flags
& AMDGPU_PTE_VALID
))
1222 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
1224 nptes
= last
- start
+ 1;
1227 * reserve space for one command every (1 << BLOCK_SIZE)
1228 * entries or 2k dwords (whatever is smaller)
1230 ncmds
= (nptes
>> min(adev
->vm_manager
.block_size
, 11u)) + 1;
1236 /* only copy commands needed */
1239 params
.func
= amdgpu_vm_do_copy_ptes
;
1241 } else if (pages_addr
) {
1242 /* copy commands needed */
1248 params
.func
= amdgpu_vm_do_copy_ptes
;
1251 /* set page commands needed */
1254 /* two extra commands for begin/end of fragment */
1257 params
.func
= amdgpu_vm_do_set_ptes
;
1260 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1264 params
.ib
= &job
->ibs
[0];
1266 if (!src
&& pages_addr
) {
1270 /* Put the PTEs at the end of the IB. */
1271 i
= ndw
- nptes
* 2;
1272 pte
= (uint64_t *)&(job
->ibs
->ptr
[i
]);
1273 params
.src
= job
->ibs
->gpu_addr
+ i
* 4;
1275 for (i
= 0; i
< nptes
; ++i
) {
1276 pte
[i
] = amdgpu_vm_map_gart(pages_addr
, addr
+ i
*
1277 AMDGPU_GPU_PAGE_SIZE
);
1283 r
= amdgpu_sync_fence(adev
, &job
->sync
, exclusive
);
1287 r
= amdgpu_sync_resv(adev
, &job
->sync
, vm
->root
.bo
->tbo
.resv
,
1292 r
= reservation_object_reserve_shared(vm
->root
.bo
->tbo
.resv
);
1296 params
.shadow
= true;
1297 amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1, addr
, flags
);
1298 params
.shadow
= false;
1299 amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1, addr
, flags
);
1301 amdgpu_ring_pad_ib(ring
, params
.ib
);
1302 WARN_ON(params
.ib
->length_dw
> ndw
);
1303 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
1304 AMDGPU_FENCE_OWNER_VM
, &f
);
1308 amdgpu_bo_fence(vm
->root
.bo
, f
, true);
1309 dma_fence_put(*fence
);
1314 amdgpu_job_free(job
);
1319 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1321 * @adev: amdgpu_device pointer
1322 * @exclusive: fence we need to sync to
1323 * @gtt_flags: flags as they are used for GTT
1324 * @pages_addr: DMA addresses to use for mapping
1326 * @mapping: mapped range and flags to use for the update
1327 * @flags: HW flags for the mapping
1328 * @nodes: array of drm_mm_nodes with the MC addresses
1329 * @fence: optional resulting fence
1331 * Split the mapping into smaller chunks so that each update fits
1333 * Returns 0 for success, -EINVAL for failure.
1335 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device
*adev
,
1336 struct dma_fence
*exclusive
,
1338 dma_addr_t
*pages_addr
,
1339 struct amdgpu_vm
*vm
,
1340 struct amdgpu_bo_va_mapping
*mapping
,
1342 struct drm_mm_node
*nodes
,
1343 struct dma_fence
**fence
)
1345 uint64_t pfn
, src
= 0, start
= mapping
->start
;
1348 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1349 * but in case of something, we filter the flags in first place
1351 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1352 flags
&= ~AMDGPU_PTE_READABLE
;
1353 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1354 flags
&= ~AMDGPU_PTE_WRITEABLE
;
1356 flags
&= ~AMDGPU_PTE_EXECUTABLE
;
1357 flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
1359 flags
&= ~AMDGPU_PTE_MTYPE_MASK
;
1360 flags
|= (mapping
->flags
& AMDGPU_PTE_MTYPE_MASK
);
1362 if ((mapping
->flags
& AMDGPU_PTE_PRT
) &&
1363 (adev
->asic_type
>= CHIP_VEGA10
)) {
1364 flags
|= AMDGPU_PTE_PRT
;
1365 flags
&= ~AMDGPU_PTE_VALID
;
1368 trace_amdgpu_vm_bo_update(mapping
);
1370 pfn
= mapping
->offset
>> PAGE_SHIFT
;
1372 while (pfn
>= nodes
->size
) {
1379 uint64_t max_entries
;
1380 uint64_t addr
, last
;
1383 addr
= nodes
->start
<< PAGE_SHIFT
;
1384 max_entries
= (nodes
->size
- pfn
) *
1385 (PAGE_SIZE
/ AMDGPU_GPU_PAGE_SIZE
);
1388 max_entries
= S64_MAX
;
1392 if (flags
== gtt_flags
)
1393 src
= adev
->gart
.table_addr
+
1394 (addr
>> AMDGPU_GPU_PAGE_SHIFT
) * 8;
1396 max_entries
= min(max_entries
, 16ull * 1024ull);
1398 } else if (flags
& AMDGPU_PTE_VALID
) {
1399 addr
+= adev
->vm_manager
.vram_base_offset
;
1401 addr
+= pfn
<< PAGE_SHIFT
;
1403 last
= min((uint64_t)mapping
->last
, start
+ max_entries
- 1);
1404 r
= amdgpu_vm_bo_update_mapping(adev
, exclusive
,
1405 src
, pages_addr
, vm
,
1406 start
, last
, flags
, addr
,
1411 pfn
+= last
- start
+ 1;
1412 if (nodes
&& nodes
->size
== pfn
) {
1418 } while (unlikely(start
!= mapping
->last
+ 1));
1424 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1426 * @adev: amdgpu_device pointer
1427 * @bo_va: requested BO and VM object
1428 * @clear: if true clear the entries
1430 * Fill in the page table entries for @bo_va.
1431 * Returns 0 for success, -EINVAL for failure.
1433 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
1434 struct amdgpu_bo_va
*bo_va
,
1437 struct amdgpu_vm
*vm
= bo_va
->vm
;
1438 struct amdgpu_bo_va_mapping
*mapping
;
1439 dma_addr_t
*pages_addr
= NULL
;
1440 uint64_t gtt_flags
, flags
;
1441 struct ttm_mem_reg
*mem
;
1442 struct drm_mm_node
*nodes
;
1443 struct dma_fence
*exclusive
;
1446 if (clear
|| !bo_va
->bo
) {
1451 struct ttm_dma_tt
*ttm
;
1453 mem
= &bo_va
->bo
->tbo
.mem
;
1454 nodes
= mem
->mm_node
;
1455 if (mem
->mem_type
== TTM_PL_TT
) {
1456 ttm
= container_of(bo_va
->bo
->tbo
.ttm
, struct
1458 pages_addr
= ttm
->dma_address
;
1460 exclusive
= reservation_object_get_excl(bo_va
->bo
->tbo
.resv
);
1464 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo_va
->bo
->tbo
.ttm
, mem
);
1465 gtt_flags
= (amdgpu_ttm_is_bound(bo_va
->bo
->tbo
.ttm
) &&
1466 adev
== amdgpu_ttm_adev(bo_va
->bo
->tbo
.bdev
)) ?
1473 spin_lock(&vm
->status_lock
);
1474 if (!list_empty(&bo_va
->vm_status
))
1475 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1476 spin_unlock(&vm
->status_lock
);
1478 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1479 r
= amdgpu_vm_bo_split_mapping(adev
, exclusive
,
1480 gtt_flags
, pages_addr
, vm
,
1481 mapping
, flags
, nodes
,
1482 &bo_va
->last_pt_update
);
1487 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1488 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1489 trace_amdgpu_vm_bo_mapping(mapping
);
1491 list_for_each_entry(mapping
, &bo_va
->invalids
, list
)
1492 trace_amdgpu_vm_bo_mapping(mapping
);
1495 spin_lock(&vm
->status_lock
);
1496 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1497 list_del_init(&bo_va
->vm_status
);
1499 list_add(&bo_va
->vm_status
, &vm
->cleared
);
1500 spin_unlock(&vm
->status_lock
);
1506 * amdgpu_vm_update_prt_state - update the global PRT state
1508 static void amdgpu_vm_update_prt_state(struct amdgpu_device
*adev
)
1510 unsigned long flags
;
1513 spin_lock_irqsave(&adev
->vm_manager
.prt_lock
, flags
);
1514 enable
= !!atomic_read(&adev
->vm_manager
.num_prt_users
);
1515 adev
->gart
.gart_funcs
->set_prt(adev
, enable
);
1516 spin_unlock_irqrestore(&adev
->vm_manager
.prt_lock
, flags
);
1520 * amdgpu_vm_prt_get - add a PRT user
1522 static void amdgpu_vm_prt_get(struct amdgpu_device
*adev
)
1524 if (!adev
->gart
.gart_funcs
->set_prt
)
1527 if (atomic_inc_return(&adev
->vm_manager
.num_prt_users
) == 1)
1528 amdgpu_vm_update_prt_state(adev
);
1532 * amdgpu_vm_prt_put - drop a PRT user
1534 static void amdgpu_vm_prt_put(struct amdgpu_device
*adev
)
1536 if (atomic_dec_return(&adev
->vm_manager
.num_prt_users
) == 0)
1537 amdgpu_vm_update_prt_state(adev
);
1541 * amdgpu_vm_prt_cb - callback for updating the PRT status
1543 static void amdgpu_vm_prt_cb(struct dma_fence
*fence
, struct dma_fence_cb
*_cb
)
1545 struct amdgpu_prt_cb
*cb
= container_of(_cb
, struct amdgpu_prt_cb
, cb
);
1547 amdgpu_vm_prt_put(cb
->adev
);
1552 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1554 static void amdgpu_vm_add_prt_cb(struct amdgpu_device
*adev
,
1555 struct dma_fence
*fence
)
1557 struct amdgpu_prt_cb
*cb
;
1559 if (!adev
->gart
.gart_funcs
->set_prt
)
1562 cb
= kmalloc(sizeof(struct amdgpu_prt_cb
), GFP_KERNEL
);
1564 /* Last resort when we are OOM */
1566 dma_fence_wait(fence
, false);
1568 amdgpu_vm_prt_put(adev
);
1571 if (!fence
|| dma_fence_add_callback(fence
, &cb
->cb
,
1573 amdgpu_vm_prt_cb(fence
, &cb
->cb
);
1578 * amdgpu_vm_free_mapping - free a mapping
1580 * @adev: amdgpu_device pointer
1582 * @mapping: mapping to be freed
1583 * @fence: fence of the unmap operation
1585 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1587 static void amdgpu_vm_free_mapping(struct amdgpu_device
*adev
,
1588 struct amdgpu_vm
*vm
,
1589 struct amdgpu_bo_va_mapping
*mapping
,
1590 struct dma_fence
*fence
)
1592 if (mapping
->flags
& AMDGPU_PTE_PRT
)
1593 amdgpu_vm_add_prt_cb(adev
, fence
);
1598 * amdgpu_vm_prt_fini - finish all prt mappings
1600 * @adev: amdgpu_device pointer
1603 * Register a cleanup callback to disable PRT support after VM dies.
1605 static void amdgpu_vm_prt_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1607 struct reservation_object
*resv
= vm
->root
.bo
->tbo
.resv
;
1608 struct dma_fence
*excl
, **shared
;
1609 unsigned i
, shared_count
;
1612 r
= reservation_object_get_fences_rcu(resv
, &excl
,
1613 &shared_count
, &shared
);
1615 /* Not enough memory to grab the fence list, as last resort
1616 * block for all the fences to complete.
1618 reservation_object_wait_timeout_rcu(resv
, true, false,
1619 MAX_SCHEDULE_TIMEOUT
);
1623 /* Add a callback for each fence in the reservation object */
1624 amdgpu_vm_prt_get(adev
);
1625 amdgpu_vm_add_prt_cb(adev
, excl
);
1627 for (i
= 0; i
< shared_count
; ++i
) {
1628 amdgpu_vm_prt_get(adev
);
1629 amdgpu_vm_add_prt_cb(adev
, shared
[i
]);
1636 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1638 * @adev: amdgpu_device pointer
1640 * @fence: optional resulting fence (unchanged if no work needed to be done
1641 * or if an error occurred)
1643 * Make sure all freed BOs are cleared in the PT.
1644 * Returns 0 for success.
1646 * PTs have to be reserved and mutex must be locked!
1648 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
1649 struct amdgpu_vm
*vm
,
1650 struct dma_fence
**fence
)
1652 struct amdgpu_bo_va_mapping
*mapping
;
1653 struct dma_fence
*f
= NULL
;
1656 while (!list_empty(&vm
->freed
)) {
1657 mapping
= list_first_entry(&vm
->freed
,
1658 struct amdgpu_bo_va_mapping
, list
);
1659 list_del(&mapping
->list
);
1661 r
= amdgpu_vm_bo_update_mapping(adev
, NULL
, 0, NULL
, vm
,
1662 mapping
->start
, mapping
->last
,
1664 amdgpu_vm_free_mapping(adev
, vm
, mapping
, f
);
1672 dma_fence_put(*fence
);
1683 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1685 * @adev: amdgpu_device pointer
1688 * Make sure all invalidated BOs are cleared in the PT.
1689 * Returns 0 for success.
1691 * PTs have to be reserved and mutex must be locked!
1693 int amdgpu_vm_clear_invalids(struct amdgpu_device
*adev
,
1694 struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
1696 struct amdgpu_bo_va
*bo_va
= NULL
;
1699 spin_lock(&vm
->status_lock
);
1700 while (!list_empty(&vm
->invalidated
)) {
1701 bo_va
= list_first_entry(&vm
->invalidated
,
1702 struct amdgpu_bo_va
, vm_status
);
1703 spin_unlock(&vm
->status_lock
);
1705 r
= amdgpu_vm_bo_update(adev
, bo_va
, true);
1709 spin_lock(&vm
->status_lock
);
1711 spin_unlock(&vm
->status_lock
);
1714 r
= amdgpu_sync_fence(adev
, sync
, bo_va
->last_pt_update
);
1720 * amdgpu_vm_bo_add - add a bo to a specific vm
1722 * @adev: amdgpu_device pointer
1724 * @bo: amdgpu buffer object
1726 * Add @bo into the requested vm.
1727 * Add @bo to the list of bos associated with the vm
1728 * Returns newly added bo_va or NULL for failure
1730 * Object has to be reserved!
1732 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
1733 struct amdgpu_vm
*vm
,
1734 struct amdgpu_bo
*bo
)
1736 struct amdgpu_bo_va
*bo_va
;
1738 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
1739 if (bo_va
== NULL
) {
1744 bo_va
->ref_count
= 1;
1745 INIT_LIST_HEAD(&bo_va
->bo_list
);
1746 INIT_LIST_HEAD(&bo_va
->valids
);
1747 INIT_LIST_HEAD(&bo_va
->invalids
);
1748 INIT_LIST_HEAD(&bo_va
->vm_status
);
1751 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
1757 * amdgpu_vm_bo_map - map bo inside a vm
1759 * @adev: amdgpu_device pointer
1760 * @bo_va: bo_va to store the address
1761 * @saddr: where to map the BO
1762 * @offset: requested offset in the BO
1763 * @flags: attributes of pages (read/write/valid/etc.)
1765 * Add a mapping of the BO at the specefied addr into the VM.
1766 * Returns 0 for success, error for failure.
1768 * Object has to be reserved and unreserved outside!
1770 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
1771 struct amdgpu_bo_va
*bo_va
,
1772 uint64_t saddr
, uint64_t offset
,
1773 uint64_t size
, uint64_t flags
)
1775 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
1776 struct amdgpu_vm
*vm
= bo_va
->vm
;
1779 /* validate the parameters */
1780 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
1781 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
1784 /* make sure object fit at this offset */
1785 eaddr
= saddr
+ size
- 1;
1786 if (saddr
>= eaddr
||
1787 (bo_va
->bo
&& offset
+ size
> amdgpu_bo_size(bo_va
->bo
)))
1790 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1791 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
1793 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
1795 /* bo and tmp overlap, invalid addr */
1796 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1797 "0x%010Lx-0x%010Lx\n", bo_va
->bo
, saddr
, eaddr
,
1798 tmp
->start
, tmp
->last
+ 1);
1802 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
1806 INIT_LIST_HEAD(&mapping
->list
);
1807 mapping
->start
= saddr
;
1808 mapping
->last
= eaddr
;
1809 mapping
->offset
= offset
;
1810 mapping
->flags
= flags
;
1812 list_add(&mapping
->list
, &bo_va
->invalids
);
1813 amdgpu_vm_it_insert(mapping
, &vm
->va
);
1815 if (flags
& AMDGPU_PTE_PRT
)
1816 amdgpu_vm_prt_get(adev
);
1822 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1824 * @adev: amdgpu_device pointer
1825 * @bo_va: bo_va to store the address
1826 * @saddr: where to map the BO
1827 * @offset: requested offset in the BO
1828 * @flags: attributes of pages (read/write/valid/etc.)
1830 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1831 * mappings as we do so.
1832 * Returns 0 for success, error for failure.
1834 * Object has to be reserved and unreserved outside!
1836 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
1837 struct amdgpu_bo_va
*bo_va
,
1838 uint64_t saddr
, uint64_t offset
,
1839 uint64_t size
, uint64_t flags
)
1841 struct amdgpu_bo_va_mapping
*mapping
;
1842 struct amdgpu_vm
*vm
= bo_va
->vm
;
1846 /* validate the parameters */
1847 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
1848 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
1851 /* make sure object fit at this offset */
1852 eaddr
= saddr
+ size
- 1;
1853 if (saddr
>= eaddr
||
1854 (bo_va
->bo
&& offset
+ size
> amdgpu_bo_size(bo_va
->bo
)))
1857 /* Allocate all the needed memory */
1858 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
1862 r
= amdgpu_vm_bo_clear_mappings(adev
, bo_va
->vm
, saddr
, size
);
1868 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1869 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
1871 mapping
->start
= saddr
;
1872 mapping
->last
= eaddr
;
1873 mapping
->offset
= offset
;
1874 mapping
->flags
= flags
;
1876 list_add(&mapping
->list
, &bo_va
->invalids
);
1877 amdgpu_vm_it_insert(mapping
, &vm
->va
);
1879 if (flags
& AMDGPU_PTE_PRT
)
1880 amdgpu_vm_prt_get(adev
);
1886 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1888 * @adev: amdgpu_device pointer
1889 * @bo_va: bo_va to remove the address from
1890 * @saddr: where to the BO is mapped
1892 * Remove a mapping of the BO at the specefied addr from the VM.
1893 * Returns 0 for success, error for failure.
1895 * Object has to be reserved and unreserved outside!
1897 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
1898 struct amdgpu_bo_va
*bo_va
,
1901 struct amdgpu_bo_va_mapping
*mapping
;
1902 struct amdgpu_vm
*vm
= bo_va
->vm
;
1905 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1907 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
1908 if (mapping
->start
== saddr
)
1912 if (&mapping
->list
== &bo_va
->valids
) {
1915 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1916 if (mapping
->start
== saddr
)
1920 if (&mapping
->list
== &bo_va
->invalids
)
1924 list_del(&mapping
->list
);
1925 amdgpu_vm_it_remove(mapping
, &vm
->va
);
1926 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1929 list_add(&mapping
->list
, &vm
->freed
);
1931 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
1932 bo_va
->last_pt_update
);
1938 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1940 * @adev: amdgpu_device pointer
1941 * @vm: VM structure to use
1942 * @saddr: start of the range
1943 * @size: size of the range
1945 * Remove all mappings in a range, split them as appropriate.
1946 * Returns 0 for success, error for failure.
1948 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
1949 struct amdgpu_vm
*vm
,
1950 uint64_t saddr
, uint64_t size
)
1952 struct amdgpu_bo_va_mapping
*before
, *after
, *tmp
, *next
;
1956 eaddr
= saddr
+ size
- 1;
1957 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1958 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
1960 /* Allocate all the needed memory */
1961 before
= kzalloc(sizeof(*before
), GFP_KERNEL
);
1964 INIT_LIST_HEAD(&before
->list
);
1966 after
= kzalloc(sizeof(*after
), GFP_KERNEL
);
1971 INIT_LIST_HEAD(&after
->list
);
1973 /* Now gather all removed mappings */
1974 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
1976 /* Remember mapping split at the start */
1977 if (tmp
->start
< saddr
) {
1978 before
->start
= tmp
->start
;
1979 before
->last
= saddr
- 1;
1980 before
->offset
= tmp
->offset
;
1981 before
->flags
= tmp
->flags
;
1982 list_add(&before
->list
, &tmp
->list
);
1985 /* Remember mapping split at the end */
1986 if (tmp
->last
> eaddr
) {
1987 after
->start
= eaddr
+ 1;
1988 after
->last
= tmp
->last
;
1989 after
->offset
= tmp
->offset
;
1990 after
->offset
+= after
->start
- tmp
->start
;
1991 after
->flags
= tmp
->flags
;
1992 list_add(&after
->list
, &tmp
->list
);
1995 list_del(&tmp
->list
);
1996 list_add(&tmp
->list
, &removed
);
1998 tmp
= amdgpu_vm_it_iter_next(tmp
, saddr
, eaddr
);
2001 /* And free them up */
2002 list_for_each_entry_safe(tmp
, next
, &removed
, list
) {
2003 amdgpu_vm_it_remove(tmp
, &vm
->va
);
2004 list_del(&tmp
->list
);
2006 if (tmp
->start
< saddr
)
2008 if (tmp
->last
> eaddr
)
2011 list_add(&tmp
->list
, &vm
->freed
);
2012 trace_amdgpu_vm_bo_unmap(NULL
, tmp
);
2015 /* Insert partial mapping before the range */
2016 if (!list_empty(&before
->list
)) {
2017 amdgpu_vm_it_insert(before
, &vm
->va
);
2018 if (before
->flags
& AMDGPU_PTE_PRT
)
2019 amdgpu_vm_prt_get(adev
);
2024 /* Insert partial mapping after the range */
2025 if (!list_empty(&after
->list
)) {
2026 amdgpu_vm_it_insert(after
, &vm
->va
);
2027 if (after
->flags
& AMDGPU_PTE_PRT
)
2028 amdgpu_vm_prt_get(adev
);
2037 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2039 * @adev: amdgpu_device pointer
2040 * @bo_va: requested bo_va
2042 * Remove @bo_va->bo from the requested vm.
2044 * Object have to be reserved!
2046 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
2047 struct amdgpu_bo_va
*bo_va
)
2049 struct amdgpu_bo_va_mapping
*mapping
, *next
;
2050 struct amdgpu_vm
*vm
= bo_va
->vm
;
2052 list_del(&bo_va
->bo_list
);
2054 spin_lock(&vm
->status_lock
);
2055 list_del(&bo_va
->vm_status
);
2056 spin_unlock(&vm
->status_lock
);
2058 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
2059 list_del(&mapping
->list
);
2060 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2061 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2062 list_add(&mapping
->list
, &vm
->freed
);
2064 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
2065 list_del(&mapping
->list
);
2066 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2067 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2068 bo_va
->last_pt_update
);
2071 dma_fence_put(bo_va
->last_pt_update
);
2076 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2078 * @adev: amdgpu_device pointer
2080 * @bo: amdgpu buffer object
2082 * Mark @bo as invalid.
2084 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
2085 struct amdgpu_bo
*bo
)
2087 struct amdgpu_bo_va
*bo_va
;
2089 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
2090 spin_lock(&bo_va
->vm
->status_lock
);
2091 if (list_empty(&bo_va
->vm_status
))
2092 list_add(&bo_va
->vm_status
, &bo_va
->vm
->invalidated
);
2093 spin_unlock(&bo_va
->vm
->status_lock
);
2097 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size
)
2099 /* Total bits covered by PD + PTs */
2100 unsigned bits
= ilog2(vm_size
) + 18;
2102 /* Make sure the PD is 4K in size up to 8GB address space.
2103 Above that split equal between PD and PTs */
2107 return ((bits
+ 3) / 2);
2111 * amdgpu_vm_adjust_size - adjust vm size and block size
2113 * @adev: amdgpu_device pointer
2114 * @vm_size: the default vm size if it's set auto
2116 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint64_t vm_size
)
2118 /* adjust vm size firstly */
2119 if (amdgpu_vm_size
== -1)
2120 adev
->vm_manager
.vm_size
= vm_size
;
2122 adev
->vm_manager
.vm_size
= amdgpu_vm_size
;
2124 /* block size depends on vm size */
2125 if (amdgpu_vm_block_size
== -1)
2126 adev
->vm_manager
.block_size
=
2127 amdgpu_vm_get_block_size(adev
->vm_manager
.vm_size
);
2129 adev
->vm_manager
.block_size
= amdgpu_vm_block_size
;
2131 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
2132 adev
->vm_manager
.vm_size
, adev
->vm_manager
.block_size
);
2136 * amdgpu_vm_init - initialize a vm instance
2138 * @adev: amdgpu_device pointer
2143 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2145 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
2146 AMDGPU_VM_PTE_COUNT(adev
) * 8);
2147 unsigned ring_instance
;
2148 struct amdgpu_ring
*ring
;
2149 struct amd_sched_rq
*rq
;
2153 vm
->client_id
= atomic64_inc_return(&adev
->vm_manager
.client_counter
);
2154 spin_lock_init(&vm
->status_lock
);
2155 INIT_LIST_HEAD(&vm
->invalidated
);
2156 INIT_LIST_HEAD(&vm
->cleared
);
2157 INIT_LIST_HEAD(&vm
->freed
);
2159 /* create scheduler entity for page table updates */
2161 ring_instance
= atomic_inc_return(&adev
->vm_manager
.vm_pte_next_ring
);
2162 ring_instance
%= adev
->vm_manager
.vm_pte_num_rings
;
2163 ring
= adev
->vm_manager
.vm_pte_rings
[ring_instance
];
2164 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_KERNEL
];
2165 r
= amd_sched_entity_init(&ring
->sched
, &vm
->entity
,
2166 rq
, amdgpu_sched_jobs
);
2170 vm
->last_dir_update
= NULL
;
2172 r
= amdgpu_bo_create(adev
, amdgpu_vm_bo_size(adev
, 0), align
, true,
2173 AMDGPU_GEM_DOMAIN_VRAM
,
2174 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
2175 AMDGPU_GEM_CREATE_SHADOW
|
2176 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
2177 AMDGPU_GEM_CREATE_VRAM_CLEARED
,
2178 NULL
, NULL
, &vm
->root
.bo
);
2180 goto error_free_sched_entity
;
2182 r
= amdgpu_bo_reserve(vm
->root
.bo
, false);
2184 goto error_free_root
;
2186 vm
->last_eviction_counter
= atomic64_read(&adev
->num_evictions
);
2187 amdgpu_bo_unreserve(vm
->root
.bo
);
2192 amdgpu_bo_unref(&vm
->root
.bo
->shadow
);
2193 amdgpu_bo_unref(&vm
->root
.bo
);
2196 error_free_sched_entity
:
2197 amd_sched_entity_fini(&ring
->sched
, &vm
->entity
);
2203 * amdgpu_vm_free_levels - free PD/PT levels
2205 * @level: PD/PT starting level to free
2207 * Free the page directory or page table level and all sub levels.
2209 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt
*level
)
2214 amdgpu_bo_unref(&level
->bo
->shadow
);
2215 amdgpu_bo_unref(&level
->bo
);
2219 for (i
= 0; i
<= level
->last_entry_used
; i
++)
2220 amdgpu_vm_free_levels(&level
->entries
[i
]);
2222 drm_free_large(level
->entries
);
2226 * amdgpu_vm_fini - tear down a vm instance
2228 * @adev: amdgpu_device pointer
2232 * Unbind the VM and remove all bos from the vm bo list
2234 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2236 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2237 bool prt_fini_needed
= !!adev
->gart
.gart_funcs
->set_prt
;
2239 amd_sched_entity_fini(vm
->entity
.sched
, &vm
->entity
);
2241 if (!RB_EMPTY_ROOT(&vm
->va
)) {
2242 dev_err(adev
->dev
, "still active bo inside vm\n");
2244 rbtree_postorder_for_each_entry_safe(mapping
, tmp
, &vm
->va
, rb
) {
2245 list_del(&mapping
->list
);
2246 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2249 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
2250 if (mapping
->flags
& AMDGPU_PTE_PRT
&& prt_fini_needed
) {
2251 amdgpu_vm_prt_fini(adev
, vm
);
2252 prt_fini_needed
= false;
2255 list_del(&mapping
->list
);
2256 amdgpu_vm_free_mapping(adev
, vm
, mapping
, NULL
);
2259 amdgpu_vm_free_levels(&vm
->root
);
2260 dma_fence_put(vm
->last_dir_update
);
2264 * amdgpu_vm_manager_init - init the VM manager
2266 * @adev: amdgpu_device pointer
2268 * Initialize the VM manager structures
2270 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
2274 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
2275 struct amdgpu_vm_id_manager
*id_mgr
=
2276 &adev
->vm_manager
.id_mgr
[i
];
2278 mutex_init(&id_mgr
->lock
);
2279 INIT_LIST_HEAD(&id_mgr
->ids_lru
);
2281 /* skip over VMID 0, since it is the system VM */
2282 for (j
= 1; j
< id_mgr
->num_ids
; ++j
) {
2283 amdgpu_vm_reset_id(adev
, i
, j
);
2284 amdgpu_sync_create(&id_mgr
->ids
[i
].active
);
2285 list_add_tail(&id_mgr
->ids
[j
].list
, &id_mgr
->ids_lru
);
2289 adev
->vm_manager
.fence_context
=
2290 dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
2291 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
2292 adev
->vm_manager
.seqno
[i
] = 0;
2294 atomic_set(&adev
->vm_manager
.vm_pte_next_ring
, 0);
2295 atomic64_set(&adev
->vm_manager
.client_counter
, 0);
2296 spin_lock_init(&adev
->vm_manager
.prt_lock
);
2297 atomic_set(&adev
->vm_manager
.num_prt_users
, 0);
2301 * amdgpu_vm_manager_fini - cleanup VM manager
2303 * @adev: amdgpu_device pointer
2305 * Cleanup the VM manager and free resources.
2307 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
2311 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
2312 struct amdgpu_vm_id_manager
*id_mgr
=
2313 &adev
->vm_manager
.id_mgr
[i
];
2315 mutex_destroy(&id_mgr
->lock
);
2316 for (j
= 0; j
< AMDGPU_NUM_VM
; ++j
) {
2317 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[j
];
2319 amdgpu_sync_free(&id
->active
);
2320 dma_fence_put(id
->flushed_updates
);
2321 dma_fence_put(id
->last_flush
);
2326 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
2328 union drm_amdgpu_vm
*args
= data
;
2330 switch (args
->in
.op
) {
2331 case AMDGPU_VM_OP_RESERVE_VMID
:
2332 case AMDGPU_VM_OP_UNRESERVE_VMID
: