2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
32 #include <drm/amdgpu_drm.h>
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
39 * GPUVM is similar to the legacy gart on older asics, however
40 * rather than there being a single global gart table
41 * for the entire GPU, there are multiple VM page tables active
42 * at any given time. The VM page tables can contain a mix
43 * vram pages and system memory pages and system memory pages
44 * can be mapped as snooped (cached system pages) or unsnooped
45 * (uncached system pages).
46 * Each VM has an ID associated with it and there is a page table
47 * associated with each VMID. When execting a command buffer,
48 * the kernel tells the the ring what VMID to use for that command
49 * buffer. VMIDs are allocated dynamically as commands are submitted.
50 * The userspace drivers maintain their own address space and the kernel
51 * sets up their pages tables accordingly when they submit their
52 * command buffers and a VMID is assigned.
53 * Cayman/Trinity support up to 8 active VMs at any given time;
57 #define START(node) ((node)->start)
58 #define LAST(node) ((node)->last)
60 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping
, rb
, uint64_t, __subtree_last
,
61 START
, LAST
, static, amdgpu_vm_it
)
66 /* Local structure. Encapsulate some VM table update parameters to reduce
67 * the number of function parameters
69 struct amdgpu_pte_update_params
{
70 /* amdgpu device we do this update for */
71 struct amdgpu_device
*adev
;
72 /* optional amdgpu_vm we do this update for */
74 /* address where to copy page table entries from */
76 /* indirect buffer to fill with commands */
78 /* Function which actually does the update */
79 void (*func
)(struct amdgpu_pte_update_params
*params
,
80 struct amdgpu_bo
*bo
, uint64_t pe
,
81 uint64_t addr
, unsigned count
, uint32_t incr
,
83 /* The next two are used during VM update by CPU
84 * DMA addresses to use for mapping
85 * Kernel pointer of PD/PT BO that needs to be updated
87 dma_addr_t
*pages_addr
;
91 /* Helper to disable partial resident texture feature from a fence callback */
92 struct amdgpu_prt_cb
{
93 struct amdgpu_device
*adev
;
94 struct dma_fence_cb cb
;
97 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base
*base
,
103 INIT_LIST_HEAD(&base
->bo_list
);
104 INIT_LIST_HEAD(&base
->vm_status
);
108 list_add_tail(&base
->bo_list
, &bo
->va
);
110 if (bo
->tbo
.resv
!= vm
->root
.base
.bo
->tbo
.resv
)
113 if (bo
->preferred_domains
&
114 amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
))
118 * we checked all the prerequisites, but it looks like this per vm bo
119 * is currently evicted. add the bo to the evicted list to make sure it
120 * is validated on next vm use to avoid fault.
122 list_move_tail(&base
->vm_status
, &vm
->evicted
);
126 * amdgpu_vm_level_shift - return the addr shift for each level
128 * @adev: amdgpu_device pointer
130 * Returns the number of bits the pfn needs to be right shifted for a level.
132 static unsigned amdgpu_vm_level_shift(struct amdgpu_device
*adev
,
135 unsigned shift
= 0xff;
141 shift
= 9 * (AMDGPU_VM_PDB0
- level
) +
142 adev
->vm_manager
.block_size
;
148 dev_err(adev
->dev
, "the level%d isn't supported.\n", level
);
155 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
157 * @adev: amdgpu_device pointer
159 * Calculate the number of entries in a page directory or page table.
161 static unsigned amdgpu_vm_num_entries(struct amdgpu_device
*adev
,
164 unsigned shift
= amdgpu_vm_level_shift(adev
,
165 adev
->vm_manager
.root_level
);
167 if (level
== adev
->vm_manager
.root_level
)
168 /* For the root directory */
169 return round_up(adev
->vm_manager
.max_pfn
, 1 << shift
) >> shift
;
170 else if (level
!= AMDGPU_VM_PTB
)
171 /* Everything in between */
174 /* For the page tables on the leaves */
175 return AMDGPU_VM_PTE_COUNT(adev
);
179 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
181 * @adev: amdgpu_device pointer
183 * Calculate the size of the BO for a page directory or page table in bytes.
185 static unsigned amdgpu_vm_bo_size(struct amdgpu_device
*adev
, unsigned level
)
187 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev
, level
) * 8);
191 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
193 * @vm: vm providing the BOs
194 * @validated: head of validation list
195 * @entry: entry to add
197 * Add the page directory to the list of BOs to
198 * validate for command submission.
200 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
201 struct list_head
*validated
,
202 struct amdgpu_bo_list_entry
*entry
)
204 entry
->robj
= vm
->root
.base
.bo
;
206 entry
->tv
.bo
= &entry
->robj
->tbo
;
207 entry
->tv
.shared
= true;
208 entry
->user_pages
= NULL
;
209 list_add(&entry
->tv
.head
, validated
);
213 * amdgpu_vm_validate_pt_bos - validate the page table BOs
215 * @adev: amdgpu device pointer
216 * @vm: vm providing the BOs
217 * @validate: callback to do the validation
218 * @param: parameter for the validation callback
220 * Validate the page table BOs on command submission if neccessary.
222 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
223 int (*validate
)(void *p
, struct amdgpu_bo
*bo
),
226 struct ttm_bo_global
*glob
= adev
->mman
.bdev
.glob
;
227 struct amdgpu_vm_bo_base
*bo_base
, *tmp
;
230 list_for_each_entry_safe(bo_base
, tmp
, &vm
->evicted
, vm_status
) {
231 struct amdgpu_bo
*bo
= bo_base
->bo
;
234 r
= validate(param
, bo
);
238 spin_lock(&glob
->lru_lock
);
239 ttm_bo_move_to_lru_tail(&bo
->tbo
);
241 ttm_bo_move_to_lru_tail(&bo
->shadow
->tbo
);
242 spin_unlock(&glob
->lru_lock
);
245 if (bo
->tbo
.type
!= ttm_bo_type_kernel
) {
246 spin_lock(&vm
->moved_lock
);
247 list_move(&bo_base
->vm_status
, &vm
->moved
);
248 spin_unlock(&vm
->moved_lock
);
250 list_move(&bo_base
->vm_status
, &vm
->relocated
);
254 spin_lock(&glob
->lru_lock
);
255 list_for_each_entry(bo_base
, &vm
->idle
, vm_status
) {
256 struct amdgpu_bo
*bo
= bo_base
->bo
;
261 ttm_bo_move_to_lru_tail(&bo
->tbo
);
263 ttm_bo_move_to_lru_tail(&bo
->shadow
->tbo
);
265 spin_unlock(&glob
->lru_lock
);
271 * amdgpu_vm_ready - check VM is ready for updates
275 * Check if all VM PDs/PTs are ready for updates
277 bool amdgpu_vm_ready(struct amdgpu_vm
*vm
)
279 return list_empty(&vm
->evicted
);
283 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
285 * @adev: amdgpu_device pointer
287 * @level: level this BO is at
289 * Root PD needs to be reserved when calling this.
291 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
292 struct amdgpu_vm
*vm
, struct amdgpu_bo
*bo
,
293 unsigned level
, bool pte_support_ats
)
295 struct ttm_operation_ctx ctx
= { true, false };
296 struct dma_fence
*fence
= NULL
;
297 unsigned entries
, ats_entries
;
298 struct amdgpu_ring
*ring
;
299 struct amdgpu_job
*job
;
303 addr
= amdgpu_bo_gpu_offset(bo
);
304 entries
= amdgpu_bo_size(bo
) / 8;
306 if (pte_support_ats
) {
307 if (level
== adev
->vm_manager
.root_level
) {
308 ats_entries
= amdgpu_vm_level_shift(adev
, level
);
309 ats_entries
+= AMDGPU_GPU_PAGE_SHIFT
;
310 ats_entries
= AMDGPU_VA_HOLE_START
>> ats_entries
;
311 ats_entries
= min(ats_entries
, entries
);
312 entries
-= ats_entries
;
314 ats_entries
= entries
;
321 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
323 r
= reservation_object_reserve_shared(bo
->tbo
.resv
);
327 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
331 r
= amdgpu_job_alloc_with_ib(adev
, 64, &job
);
338 ats_value
= AMDGPU_PTE_DEFAULT_ATC
;
339 if (level
!= AMDGPU_VM_PTB
)
340 ats_value
|= AMDGPU_PDE_PTE
;
342 amdgpu_vm_set_pte_pde(adev
, &job
->ibs
[0], addr
, 0,
343 ats_entries
, 0, ats_value
);
344 addr
+= ats_entries
* 8;
348 amdgpu_vm_set_pte_pde(adev
, &job
->ibs
[0], addr
, 0,
351 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
353 WARN_ON(job
->ibs
[0].length_dw
> 64);
354 r
= amdgpu_sync_resv(adev
, &job
->sync
, bo
->tbo
.resv
,
355 AMDGPU_FENCE_OWNER_UNDEFINED
, false);
359 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
360 AMDGPU_FENCE_OWNER_UNDEFINED
, &fence
);
364 amdgpu_bo_fence(bo
, fence
, true);
365 dma_fence_put(fence
);
368 return amdgpu_vm_clear_bo(adev
, vm
, bo
->shadow
,
369 level
, pte_support_ats
);
374 amdgpu_job_free(job
);
381 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
383 * @adev: amdgpu_device pointer
385 * @saddr: start of the address range
386 * @eaddr: end of the address range
388 * Make sure the page directories and page tables are allocated
390 static int amdgpu_vm_alloc_levels(struct amdgpu_device
*adev
,
391 struct amdgpu_vm
*vm
,
392 struct amdgpu_vm_pt
*parent
,
393 uint64_t saddr
, uint64_t eaddr
,
394 unsigned level
, bool ats
)
396 unsigned shift
= amdgpu_vm_level_shift(adev
, level
);
397 unsigned pt_idx
, from
, to
;
401 if (!parent
->entries
) {
402 unsigned num_entries
= amdgpu_vm_num_entries(adev
, level
);
404 parent
->entries
= kvmalloc_array(num_entries
,
405 sizeof(struct amdgpu_vm_pt
),
406 GFP_KERNEL
| __GFP_ZERO
);
407 if (!parent
->entries
)
409 memset(parent
->entries
, 0 , sizeof(struct amdgpu_vm_pt
));
412 from
= saddr
>> shift
;
414 if (from
>= amdgpu_vm_num_entries(adev
, level
) ||
415 to
>= amdgpu_vm_num_entries(adev
, level
))
419 saddr
= saddr
& ((1 << shift
) - 1);
420 eaddr
= eaddr
& ((1 << shift
) - 1);
422 flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
423 if (vm
->use_cpu_for_update
)
424 flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
426 flags
|= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
427 AMDGPU_GEM_CREATE_SHADOW
);
429 /* walk over the address space and allocate the page tables */
430 for (pt_idx
= from
; pt_idx
<= to
; ++pt_idx
) {
431 struct reservation_object
*resv
= vm
->root
.base
.bo
->tbo
.resv
;
432 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
433 struct amdgpu_bo
*pt
;
435 if (!entry
->base
.bo
) {
436 struct amdgpu_bo_param bp
;
438 memset(&bp
, 0, sizeof(bp
));
439 bp
.size
= amdgpu_vm_bo_size(adev
, level
);
440 bp
.byte_align
= AMDGPU_GPU_PAGE_SIZE
;
441 bp
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
443 bp
.type
= ttm_bo_type_kernel
;
445 r
= amdgpu_bo_create(adev
, &bp
, &pt
);
449 r
= amdgpu_vm_clear_bo(adev
, vm
, pt
, level
, ats
);
451 amdgpu_bo_unref(&pt
->shadow
);
452 amdgpu_bo_unref(&pt
);
456 if (vm
->use_cpu_for_update
) {
457 r
= amdgpu_bo_kmap(pt
, NULL
);
459 amdgpu_bo_unref(&pt
->shadow
);
460 amdgpu_bo_unref(&pt
);
465 /* Keep a reference to the root directory to avoid
466 * freeing them up in the wrong order.
468 pt
->parent
= amdgpu_bo_ref(parent
->base
.bo
);
470 amdgpu_vm_bo_base_init(&entry
->base
, vm
, pt
);
471 list_move(&entry
->base
.vm_status
, &vm
->relocated
);
474 if (level
< AMDGPU_VM_PTB
) {
475 uint64_t sub_saddr
= (pt_idx
== from
) ? saddr
: 0;
476 uint64_t sub_eaddr
= (pt_idx
== to
) ? eaddr
:
478 r
= amdgpu_vm_alloc_levels(adev
, vm
, entry
, sub_saddr
,
479 sub_eaddr
, level
, ats
);
489 * amdgpu_vm_alloc_pts - Allocate page tables.
491 * @adev: amdgpu_device pointer
492 * @vm: VM to allocate page tables for
493 * @saddr: Start address which needs to be allocated
494 * @size: Size from start address we need.
496 * Make sure the page tables are allocated.
498 int amdgpu_vm_alloc_pts(struct amdgpu_device
*adev
,
499 struct amdgpu_vm
*vm
,
500 uint64_t saddr
, uint64_t size
)
505 /* validate the parameters */
506 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| size
& AMDGPU_GPU_PAGE_MASK
)
509 eaddr
= saddr
+ size
- 1;
511 if (vm
->pte_support_ats
)
512 ats
= saddr
< AMDGPU_VA_HOLE_START
;
514 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
515 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
517 if (eaddr
>= adev
->vm_manager
.max_pfn
) {
518 dev_err(adev
->dev
, "va above limit (0x%08llX >= 0x%08llX)\n",
519 eaddr
, adev
->vm_manager
.max_pfn
);
523 return amdgpu_vm_alloc_levels(adev
, vm
, &vm
->root
, saddr
, eaddr
,
524 adev
->vm_manager
.root_level
, ats
);
528 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
530 * @adev: amdgpu_device pointer
532 void amdgpu_vm_check_compute_bug(struct amdgpu_device
*adev
)
534 const struct amdgpu_ip_block
*ip_block
;
535 bool has_compute_vm_bug
;
536 struct amdgpu_ring
*ring
;
539 has_compute_vm_bug
= false;
541 ip_block
= amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
543 /* Compute has a VM bug for GFX version < 7.
544 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
545 if (ip_block
->version
->major
<= 7)
546 has_compute_vm_bug
= true;
547 else if (ip_block
->version
->major
== 8)
548 if (adev
->gfx
.mec_fw_version
< 673)
549 has_compute_vm_bug
= true;
552 for (i
= 0; i
< adev
->num_rings
; i
++) {
553 ring
= adev
->rings
[i
];
554 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
)
555 /* only compute rings */
556 ring
->has_compute_vm_bug
= has_compute_vm_bug
;
558 ring
->has_compute_vm_bug
= false;
562 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring
*ring
,
563 struct amdgpu_job
*job
)
565 struct amdgpu_device
*adev
= ring
->adev
;
566 unsigned vmhub
= ring
->funcs
->vmhub
;
567 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
568 struct amdgpu_vmid
*id
;
569 bool gds_switch_needed
;
570 bool vm_flush_needed
= job
->vm_needs_flush
|| ring
->has_compute_vm_bug
;
574 id
= &id_mgr
->ids
[job
->vmid
];
575 gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
576 id
->gds_base
!= job
->gds_base
||
577 id
->gds_size
!= job
->gds_size
||
578 id
->gws_base
!= job
->gws_base
||
579 id
->gws_size
!= job
->gws_size
||
580 id
->oa_base
!= job
->oa_base
||
581 id
->oa_size
!= job
->oa_size
);
583 if (amdgpu_vmid_had_gpu_reset(adev
, id
))
586 return vm_flush_needed
|| gds_switch_needed
;
589 static bool amdgpu_vm_is_large_bar(struct amdgpu_device
*adev
)
591 return (adev
->gmc
.real_vram_size
== adev
->gmc
.visible_vram_size
);
595 * amdgpu_vm_flush - hardware flush the vm
597 * @ring: ring to use for flush
598 * @vmid: vmid number to use
599 * @pd_addr: address of the page directory
601 * Emit a VM flush when it is necessary.
603 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
, bool need_pipe_sync
)
605 struct amdgpu_device
*adev
= ring
->adev
;
606 unsigned vmhub
= ring
->funcs
->vmhub
;
607 struct amdgpu_vmid_mgr
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
608 struct amdgpu_vmid
*id
= &id_mgr
->ids
[job
->vmid
];
609 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
610 id
->gds_base
!= job
->gds_base
||
611 id
->gds_size
!= job
->gds_size
||
612 id
->gws_base
!= job
->gws_base
||
613 id
->gws_size
!= job
->gws_size
||
614 id
->oa_base
!= job
->oa_base
||
615 id
->oa_size
!= job
->oa_size
);
616 bool vm_flush_needed
= job
->vm_needs_flush
;
617 bool pasid_mapping_needed
= id
->pasid
!= job
->pasid
||
618 !id
->pasid_mapping
||
619 !dma_fence_is_signaled(id
->pasid_mapping
);
620 struct dma_fence
*fence
= NULL
;
621 unsigned patch_offset
= 0;
624 if (amdgpu_vmid_had_gpu_reset(adev
, id
)) {
625 gds_switch_needed
= true;
626 vm_flush_needed
= true;
627 pasid_mapping_needed
= true;
630 gds_switch_needed
&= !!ring
->funcs
->emit_gds_switch
;
631 vm_flush_needed
&= !!ring
->funcs
->emit_vm_flush
;
632 pasid_mapping_needed
&= adev
->gmc
.gmc_funcs
->emit_pasid_mapping
&&
633 ring
->funcs
->emit_wreg
;
635 if (!vm_flush_needed
&& !gds_switch_needed
&& !need_pipe_sync
)
638 if (ring
->funcs
->init_cond_exec
)
639 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
642 amdgpu_ring_emit_pipeline_sync(ring
);
644 if (vm_flush_needed
) {
645 trace_amdgpu_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
646 amdgpu_ring_emit_vm_flush(ring
, job
->vmid
, job
->vm_pd_addr
);
649 if (pasid_mapping_needed
)
650 amdgpu_gmc_emit_pasid_mapping(ring
, job
->vmid
, job
->pasid
);
652 if (vm_flush_needed
|| pasid_mapping_needed
) {
653 r
= amdgpu_fence_emit(ring
, &fence
, 0);
658 if (vm_flush_needed
) {
659 mutex_lock(&id_mgr
->lock
);
660 dma_fence_put(id
->last_flush
);
661 id
->last_flush
= dma_fence_get(fence
);
662 id
->current_gpu_reset_count
=
663 atomic_read(&adev
->gpu_reset_counter
);
664 mutex_unlock(&id_mgr
->lock
);
667 if (pasid_mapping_needed
) {
668 id
->pasid
= job
->pasid
;
669 dma_fence_put(id
->pasid_mapping
);
670 id
->pasid_mapping
= dma_fence_get(fence
);
672 dma_fence_put(fence
);
674 if (ring
->funcs
->emit_gds_switch
&& gds_switch_needed
) {
675 id
->gds_base
= job
->gds_base
;
676 id
->gds_size
= job
->gds_size
;
677 id
->gws_base
= job
->gws_base
;
678 id
->gws_size
= job
->gws_size
;
679 id
->oa_base
= job
->oa_base
;
680 id
->oa_size
= job
->oa_size
;
681 amdgpu_ring_emit_gds_switch(ring
, job
->vmid
, job
->gds_base
,
682 job
->gds_size
, job
->gws_base
,
683 job
->gws_size
, job
->oa_base
,
687 if (ring
->funcs
->patch_cond_exec
)
688 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
690 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
691 if (ring
->funcs
->emit_switch_buffer
) {
692 amdgpu_ring_emit_switch_buffer(ring
);
693 amdgpu_ring_emit_switch_buffer(ring
);
699 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
702 * @bo: requested buffer object
704 * Find @bo inside the requested vm.
705 * Search inside the @bos vm list for the requested vm
706 * Returns the found bo_va or NULL if none is found
708 * Object has to be reserved!
710 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
711 struct amdgpu_bo
*bo
)
713 struct amdgpu_bo_va
*bo_va
;
715 list_for_each_entry(bo_va
, &bo
->va
, base
.bo_list
) {
716 if (bo_va
->base
.vm
== vm
) {
724 * amdgpu_vm_do_set_ptes - helper to call the right asic function
726 * @params: see amdgpu_pte_update_params definition
727 * @bo: PD/PT to update
728 * @pe: addr of the page entry
729 * @addr: dst addr to write into pe
730 * @count: number of page entries to update
731 * @incr: increase next addr by incr bytes
732 * @flags: hw access flags
734 * Traces the parameters and calls the right asic functions
735 * to setup the page table using the DMA.
737 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params
*params
,
738 struct amdgpu_bo
*bo
,
739 uint64_t pe
, uint64_t addr
,
740 unsigned count
, uint32_t incr
,
743 pe
+= amdgpu_bo_gpu_offset(bo
);
744 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
747 amdgpu_vm_write_pte(params
->adev
, params
->ib
, pe
,
748 addr
| flags
, count
, incr
);
751 amdgpu_vm_set_pte_pde(params
->adev
, params
->ib
, pe
, addr
,
757 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
759 * @params: see amdgpu_pte_update_params definition
760 * @bo: PD/PT to update
761 * @pe: addr of the page entry
762 * @addr: dst addr to write into pe
763 * @count: number of page entries to update
764 * @incr: increase next addr by incr bytes
765 * @flags: hw access flags
767 * Traces the parameters and calls the DMA function to copy the PTEs.
769 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params
*params
,
770 struct amdgpu_bo
*bo
,
771 uint64_t pe
, uint64_t addr
,
772 unsigned count
, uint32_t incr
,
775 uint64_t src
= (params
->src
+ (addr
>> 12) * 8);
777 pe
+= amdgpu_bo_gpu_offset(bo
);
778 trace_amdgpu_vm_copy_ptes(pe
, src
, count
);
780 amdgpu_vm_copy_pte(params
->adev
, params
->ib
, pe
, src
, count
);
784 * amdgpu_vm_map_gart - Resolve gart mapping of addr
786 * @pages_addr: optional DMA address to use for lookup
787 * @addr: the unmapped addr
789 * Look up the physical address of the page that the pte resolves
790 * to and return the pointer for the page table entry.
792 static uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
796 /* page table offset */
797 result
= pages_addr
[addr
>> PAGE_SHIFT
];
799 /* in case cpu page size != gpu page size*/
800 result
|= addr
& (~PAGE_MASK
);
802 result
&= 0xFFFFFFFFFFFFF000ULL
;
808 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
810 * @params: see amdgpu_pte_update_params definition
811 * @bo: PD/PT to update
812 * @pe: kmap addr of the page entry
813 * @addr: dst addr to write into pe
814 * @count: number of page entries to update
815 * @incr: increase next addr by incr bytes
816 * @flags: hw access flags
818 * Write count number of PT/PD entries directly.
820 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params
*params
,
821 struct amdgpu_bo
*bo
,
822 uint64_t pe
, uint64_t addr
,
823 unsigned count
, uint32_t incr
,
829 pe
+= (unsigned long)amdgpu_bo_kptr(bo
);
831 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
833 for (i
= 0; i
< count
; i
++) {
834 value
= params
->pages_addr
?
835 amdgpu_vm_map_gart(params
->pages_addr
, addr
) :
837 amdgpu_gmc_set_pte_pde(params
->adev
, (void *)(uintptr_t)pe
,
843 static int amdgpu_vm_wait_pd(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
846 struct amdgpu_sync sync
;
849 amdgpu_sync_create(&sync
);
850 amdgpu_sync_resv(adev
, &sync
, vm
->root
.base
.bo
->tbo
.resv
, owner
, false);
851 r
= amdgpu_sync_wait(&sync
, true);
852 amdgpu_sync_free(&sync
);
858 * amdgpu_vm_update_pde - update a single level in the hierarchy
860 * @param: parameters for the update
862 * @parent: parent directory
863 * @entry: entry to update
865 * Makes sure the requested entry in parent is up to date.
867 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params
*params
,
868 struct amdgpu_vm
*vm
,
869 struct amdgpu_vm_pt
*parent
,
870 struct amdgpu_vm_pt
*entry
)
872 struct amdgpu_bo
*bo
= parent
->base
.bo
, *pbo
;
873 uint64_t pde
, pt
, flags
;
876 /* Don't update huge pages here */
880 for (level
= 0, pbo
= bo
->parent
; pbo
; ++level
)
883 level
+= params
->adev
->vm_manager
.root_level
;
884 pt
= amdgpu_bo_gpu_offset(entry
->base
.bo
);
885 flags
= AMDGPU_PTE_VALID
;
886 amdgpu_gmc_get_vm_pde(params
->adev
, level
, &pt
, &flags
);
887 pde
= (entry
- parent
->entries
) * 8;
889 params
->func(params
, bo
->shadow
, pde
, pt
, 1, 0, flags
);
890 params
->func(params
, bo
, pde
, pt
, 1, 0, flags
);
894 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
898 * Mark all PD level as invalid after an error.
900 static void amdgpu_vm_invalidate_level(struct amdgpu_device
*adev
,
901 struct amdgpu_vm
*vm
,
902 struct amdgpu_vm_pt
*parent
,
905 unsigned pt_idx
, num_entries
;
908 * Recurse into the subdirectories. This recursion is harmless because
909 * we only have a maximum of 5 layers.
911 num_entries
= amdgpu_vm_num_entries(adev
, level
);
912 for (pt_idx
= 0; pt_idx
< num_entries
; ++pt_idx
) {
913 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
918 if (!entry
->base
.moved
)
919 list_move(&entry
->base
.vm_status
, &vm
->relocated
);
920 amdgpu_vm_invalidate_level(adev
, vm
, entry
, level
+ 1);
925 * amdgpu_vm_update_directories - make sure that all directories are valid
927 * @adev: amdgpu_device pointer
930 * Makes sure all directories are up to date.
931 * Returns 0 for success, error for failure.
933 int amdgpu_vm_update_directories(struct amdgpu_device
*adev
,
934 struct amdgpu_vm
*vm
)
936 struct amdgpu_pte_update_params params
;
937 struct amdgpu_job
*job
;
941 if (list_empty(&vm
->relocated
))
945 memset(¶ms
, 0, sizeof(params
));
948 if (vm
->use_cpu_for_update
) {
949 struct amdgpu_vm_bo_base
*bo_base
;
951 list_for_each_entry(bo_base
, &vm
->relocated
, vm_status
) {
952 r
= amdgpu_bo_kmap(bo_base
->bo
, NULL
);
957 r
= amdgpu_vm_wait_pd(adev
, vm
, AMDGPU_FENCE_OWNER_VM
);
961 params
.func
= amdgpu_vm_cpu_set_ptes
;
964 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
968 params
.ib
= &job
->ibs
[0];
969 params
.func
= amdgpu_vm_do_set_ptes
;
972 while (!list_empty(&vm
->relocated
)) {
973 struct amdgpu_vm_bo_base
*bo_base
, *parent
;
974 struct amdgpu_vm_pt
*pt
, *entry
;
975 struct amdgpu_bo
*bo
;
977 bo_base
= list_first_entry(&vm
->relocated
,
978 struct amdgpu_vm_bo_base
,
980 bo_base
->moved
= false;
981 list_move(&bo_base
->vm_status
, &vm
->idle
);
983 bo
= bo_base
->bo
->parent
;
987 parent
= list_first_entry(&bo
->va
, struct amdgpu_vm_bo_base
,
989 pt
= container_of(parent
, struct amdgpu_vm_pt
, base
);
990 entry
= container_of(bo_base
, struct amdgpu_vm_pt
, base
);
992 amdgpu_vm_update_pde(¶ms
, vm
, pt
, entry
);
994 if (!vm
->use_cpu_for_update
&&
995 (ndw
- params
.ib
->length_dw
) < 32)
999 if (vm
->use_cpu_for_update
) {
1002 amdgpu_asic_flush_hdp(adev
, NULL
);
1003 } else if (params
.ib
->length_dw
== 0) {
1004 amdgpu_job_free(job
);
1006 struct amdgpu_bo
*root
= vm
->root
.base
.bo
;
1007 struct amdgpu_ring
*ring
;
1008 struct dma_fence
*fence
;
1010 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
,
1013 amdgpu_ring_pad_ib(ring
, params
.ib
);
1014 amdgpu_sync_resv(adev
, &job
->sync
, root
->tbo
.resv
,
1015 AMDGPU_FENCE_OWNER_VM
, false);
1016 WARN_ON(params
.ib
->length_dw
> ndw
);
1017 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
1018 AMDGPU_FENCE_OWNER_VM
, &fence
);
1022 amdgpu_bo_fence(root
, fence
, true);
1023 dma_fence_put(vm
->last_update
);
1024 vm
->last_update
= fence
;
1027 if (!list_empty(&vm
->relocated
))
1033 amdgpu_vm_invalidate_level(adev
, vm
, &vm
->root
,
1034 adev
->vm_manager
.root_level
);
1035 amdgpu_job_free(job
);
1040 * amdgpu_vm_find_entry - find the entry for an address
1042 * @p: see amdgpu_pte_update_params definition
1043 * @addr: virtual address in question
1044 * @entry: resulting entry or NULL
1045 * @parent: parent entry
1047 * Find the vm_pt entry and it's parent for the given address.
1049 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params
*p
, uint64_t addr
,
1050 struct amdgpu_vm_pt
**entry
,
1051 struct amdgpu_vm_pt
**parent
)
1053 unsigned level
= p
->adev
->vm_manager
.root_level
;
1056 *entry
= &p
->vm
->root
;
1057 while ((*entry
)->entries
) {
1058 unsigned shift
= amdgpu_vm_level_shift(p
->adev
, level
++);
1061 *entry
= &(*entry
)->entries
[addr
>> shift
];
1062 addr
&= (1ULL << shift
) - 1;
1065 if (level
!= AMDGPU_VM_PTB
)
1070 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1072 * @p: see amdgpu_pte_update_params definition
1073 * @entry: vm_pt entry to check
1074 * @parent: parent entry
1075 * @nptes: number of PTEs updated with this operation
1076 * @dst: destination address where the PTEs should point to
1077 * @flags: access flags fro the PTEs
1079 * Check if we can update the PD with a huge page.
1081 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params
*p
,
1082 struct amdgpu_vm_pt
*entry
,
1083 struct amdgpu_vm_pt
*parent
,
1084 unsigned nptes
, uint64_t dst
,
1089 /* In the case of a mixed PT the PDE must point to it*/
1090 if (p
->adev
->asic_type
>= CHIP_VEGA10
&& !p
->src
&&
1091 nptes
== AMDGPU_VM_PTE_COUNT(p
->adev
)) {
1092 /* Set the huge page flag to stop scanning at this PDE */
1093 flags
|= AMDGPU_PDE_PTE
;
1096 if (!(flags
& AMDGPU_PDE_PTE
)) {
1098 /* Add the entry to the relocated list to update it. */
1099 entry
->huge
= false;
1100 list_move(&entry
->base
.vm_status
, &p
->vm
->relocated
);
1106 amdgpu_gmc_get_vm_pde(p
->adev
, AMDGPU_VM_PDB0
, &dst
, &flags
);
1108 pde
= (entry
- parent
->entries
) * 8;
1109 if (parent
->base
.bo
->shadow
)
1110 p
->func(p
, parent
->base
.bo
->shadow
, pde
, dst
, 1, 0, flags
);
1111 p
->func(p
, parent
->base
.bo
, pde
, dst
, 1, 0, flags
);
1115 * amdgpu_vm_update_ptes - make sure that page tables are valid
1117 * @params: see amdgpu_pte_update_params definition
1119 * @start: start of GPU address range
1120 * @end: end of GPU address range
1121 * @dst: destination address to map to, the next dst inside the function
1122 * @flags: mapping flags
1124 * Update the page tables in the range @start - @end.
1125 * Returns 0 for success, -EINVAL for failure.
1127 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params
*params
,
1128 uint64_t start
, uint64_t end
,
1129 uint64_t dst
, uint64_t flags
)
1131 struct amdgpu_device
*adev
= params
->adev
;
1132 const uint64_t mask
= AMDGPU_VM_PTE_COUNT(adev
) - 1;
1134 uint64_t addr
, pe_start
;
1135 struct amdgpu_bo
*pt
;
1138 /* walk over the address space and update the page tables */
1139 for (addr
= start
; addr
< end
; addr
+= nptes
,
1140 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
) {
1141 struct amdgpu_vm_pt
*entry
, *parent
;
1143 amdgpu_vm_get_entry(params
, addr
, &entry
, &parent
);
1147 if ((addr
& ~mask
) == (end
& ~mask
))
1150 nptes
= AMDGPU_VM_PTE_COUNT(adev
) - (addr
& mask
);
1152 amdgpu_vm_handle_huge_pages(params
, entry
, parent
,
1154 /* We don't need to update PTEs for huge pages */
1158 pt
= entry
->base
.bo
;
1159 pe_start
= (addr
& mask
) * 8;
1161 params
->func(params
, pt
->shadow
, pe_start
, dst
, nptes
,
1162 AMDGPU_GPU_PAGE_SIZE
, flags
);
1163 params
->func(params
, pt
, pe_start
, dst
, nptes
,
1164 AMDGPU_GPU_PAGE_SIZE
, flags
);
1171 * amdgpu_vm_frag_ptes - add fragment information to PTEs
1173 * @params: see amdgpu_pte_update_params definition
1175 * @start: first PTE to handle
1176 * @end: last PTE to handle
1177 * @dst: addr those PTEs should point to
1178 * @flags: hw mapping flags
1179 * Returns 0 for success, -EINVAL for failure.
1181 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params
*params
,
1182 uint64_t start
, uint64_t end
,
1183 uint64_t dst
, uint64_t flags
)
1186 * The MC L1 TLB supports variable sized pages, based on a fragment
1187 * field in the PTE. When this field is set to a non-zero value, page
1188 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1189 * flags are considered valid for all PTEs within the fragment range
1190 * and corresponding mappings are assumed to be physically contiguous.
1192 * The L1 TLB can store a single PTE for the whole fragment,
1193 * significantly increasing the space available for translation
1194 * caching. This leads to large improvements in throughput when the
1195 * TLB is under pressure.
1197 * The L2 TLB distributes small and large fragments into two
1198 * asymmetric partitions. The large fragment cache is significantly
1199 * larger. Thus, we try to use large fragments wherever possible.
1200 * Userspace can support this by aligning virtual base address and
1201 * allocation size to the fragment size.
1203 unsigned max_frag
= params
->adev
->vm_manager
.fragment_size
;
1206 /* system pages are non continuously */
1207 if (params
->src
|| !(flags
& AMDGPU_PTE_VALID
))
1208 return amdgpu_vm_update_ptes(params
, start
, end
, dst
, flags
);
1210 while (start
!= end
) {
1211 uint64_t frag_flags
, frag_end
;
1214 /* This intentionally wraps around if no bit is set */
1215 frag
= min((unsigned)ffs(start
) - 1,
1216 (unsigned)fls64(end
- start
) - 1);
1217 if (frag
>= max_frag
) {
1218 frag_flags
= AMDGPU_PTE_FRAG(max_frag
);
1219 frag_end
= end
& ~((1ULL << max_frag
) - 1);
1221 frag_flags
= AMDGPU_PTE_FRAG(frag
);
1222 frag_end
= start
+ (1 << frag
);
1225 r
= amdgpu_vm_update_ptes(params
, start
, frag_end
, dst
,
1226 flags
| frag_flags
);
1230 dst
+= (frag_end
- start
) * AMDGPU_GPU_PAGE_SIZE
;
1238 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1240 * @adev: amdgpu_device pointer
1241 * @exclusive: fence we need to sync to
1242 * @pages_addr: DMA addresses to use for mapping
1244 * @start: start of mapped range
1245 * @last: last mapped entry
1246 * @flags: flags for the entries
1247 * @addr: addr to set the area to
1248 * @fence: optional resulting fence
1250 * Fill in the page table entries between @start and @last.
1251 * Returns 0 for success, -EINVAL for failure.
1253 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
1254 struct dma_fence
*exclusive
,
1255 dma_addr_t
*pages_addr
,
1256 struct amdgpu_vm
*vm
,
1257 uint64_t start
, uint64_t last
,
1258 uint64_t flags
, uint64_t addr
,
1259 struct dma_fence
**fence
)
1261 struct amdgpu_ring
*ring
;
1262 void *owner
= AMDGPU_FENCE_OWNER_VM
;
1263 unsigned nptes
, ncmds
, ndw
;
1264 struct amdgpu_job
*job
;
1265 struct amdgpu_pte_update_params params
;
1266 struct dma_fence
*f
= NULL
;
1269 memset(¶ms
, 0, sizeof(params
));
1273 /* sync to everything on unmapping */
1274 if (!(flags
& AMDGPU_PTE_VALID
))
1275 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
1277 if (vm
->use_cpu_for_update
) {
1278 /* params.src is used as flag to indicate system Memory */
1282 /* Wait for PT BOs to be free. PTs share the same resv. object
1285 r
= amdgpu_vm_wait_pd(adev
, vm
, owner
);
1289 params
.func
= amdgpu_vm_cpu_set_ptes
;
1290 params
.pages_addr
= pages_addr
;
1291 return amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1,
1295 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
1297 nptes
= last
- start
+ 1;
1300 * reserve space for two commands every (1 << BLOCK_SIZE)
1301 * entries or 2k dwords (whatever is smaller)
1303 * The second command is for the shadow pagetables.
1305 if (vm
->root
.base
.bo
->shadow
)
1306 ncmds
= ((nptes
>> min(adev
->vm_manager
.block_size
, 11u)) + 1) * 2;
1308 ncmds
= ((nptes
>> min(adev
->vm_manager
.block_size
, 11u)) + 1);
1314 /* copy commands needed */
1315 ndw
+= ncmds
* adev
->vm_manager
.vm_pte_funcs
->copy_pte_num_dw
;
1320 params
.func
= amdgpu_vm_do_copy_ptes
;
1323 /* set page commands needed */
1326 /* extra commands for begin/end fragments */
1327 ndw
+= 2 * 10 * adev
->vm_manager
.fragment_size
;
1329 params
.func
= amdgpu_vm_do_set_ptes
;
1332 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1336 params
.ib
= &job
->ibs
[0];
1342 /* Put the PTEs at the end of the IB. */
1343 i
= ndw
- nptes
* 2;
1344 pte
= (uint64_t *)&(job
->ibs
->ptr
[i
]);
1345 params
.src
= job
->ibs
->gpu_addr
+ i
* 4;
1347 for (i
= 0; i
< nptes
; ++i
) {
1348 pte
[i
] = amdgpu_vm_map_gart(pages_addr
, addr
+ i
*
1349 AMDGPU_GPU_PAGE_SIZE
);
1355 r
= amdgpu_sync_fence(adev
, &job
->sync
, exclusive
, false);
1359 r
= amdgpu_sync_resv(adev
, &job
->sync
, vm
->root
.base
.bo
->tbo
.resv
,
1364 r
= reservation_object_reserve_shared(vm
->root
.base
.bo
->tbo
.resv
);
1368 r
= amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1, addr
, flags
);
1372 amdgpu_ring_pad_ib(ring
, params
.ib
);
1373 WARN_ON(params
.ib
->length_dw
> ndw
);
1374 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
1375 AMDGPU_FENCE_OWNER_VM
, &f
);
1379 amdgpu_bo_fence(vm
->root
.base
.bo
, f
, true);
1380 dma_fence_put(*fence
);
1385 amdgpu_job_free(job
);
1390 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1392 * @adev: amdgpu_device pointer
1393 * @exclusive: fence we need to sync to
1394 * @pages_addr: DMA addresses to use for mapping
1396 * @mapping: mapped range and flags to use for the update
1397 * @flags: HW flags for the mapping
1398 * @nodes: array of drm_mm_nodes with the MC addresses
1399 * @fence: optional resulting fence
1401 * Split the mapping into smaller chunks so that each update fits
1403 * Returns 0 for success, -EINVAL for failure.
1405 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device
*adev
,
1406 struct dma_fence
*exclusive
,
1407 dma_addr_t
*pages_addr
,
1408 struct amdgpu_vm
*vm
,
1409 struct amdgpu_bo_va_mapping
*mapping
,
1411 struct drm_mm_node
*nodes
,
1412 struct dma_fence
**fence
)
1414 unsigned min_linear_pages
= 1 << adev
->vm_manager
.fragment_size
;
1415 uint64_t pfn
, start
= mapping
->start
;
1418 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1419 * but in case of something, we filter the flags in first place
1421 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1422 flags
&= ~AMDGPU_PTE_READABLE
;
1423 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1424 flags
&= ~AMDGPU_PTE_WRITEABLE
;
1426 flags
&= ~AMDGPU_PTE_EXECUTABLE
;
1427 flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
1429 flags
&= ~AMDGPU_PTE_MTYPE_MASK
;
1430 flags
|= (mapping
->flags
& AMDGPU_PTE_MTYPE_MASK
);
1432 if ((mapping
->flags
& AMDGPU_PTE_PRT
) &&
1433 (adev
->asic_type
>= CHIP_VEGA10
)) {
1434 flags
|= AMDGPU_PTE_PRT
;
1435 flags
&= ~AMDGPU_PTE_VALID
;
1438 trace_amdgpu_vm_bo_update(mapping
);
1440 pfn
= mapping
->offset
>> PAGE_SHIFT
;
1442 while (pfn
>= nodes
->size
) {
1449 dma_addr_t
*dma_addr
= NULL
;
1450 uint64_t max_entries
;
1451 uint64_t addr
, last
;
1454 addr
= nodes
->start
<< PAGE_SHIFT
;
1455 max_entries
= (nodes
->size
- pfn
) *
1456 (PAGE_SIZE
/ AMDGPU_GPU_PAGE_SIZE
);
1459 max_entries
= S64_MAX
;
1465 max_entries
= min(max_entries
, 16ull * 1024ull);
1466 for (count
= 1; count
< max_entries
; ++count
) {
1467 uint64_t idx
= pfn
+ count
;
1469 if (pages_addr
[idx
] !=
1470 (pages_addr
[idx
- 1] + PAGE_SIZE
))
1474 if (count
< min_linear_pages
) {
1475 addr
= pfn
<< PAGE_SHIFT
;
1476 dma_addr
= pages_addr
;
1478 addr
= pages_addr
[pfn
];
1479 max_entries
= count
;
1482 } else if (flags
& AMDGPU_PTE_VALID
) {
1483 addr
+= adev
->vm_manager
.vram_base_offset
;
1484 addr
+= pfn
<< PAGE_SHIFT
;
1487 last
= min((uint64_t)mapping
->last
, start
+ max_entries
- 1);
1488 r
= amdgpu_vm_bo_update_mapping(adev
, exclusive
, dma_addr
, vm
,
1489 start
, last
, flags
, addr
,
1494 pfn
+= last
- start
+ 1;
1495 if (nodes
&& nodes
->size
== pfn
) {
1501 } while (unlikely(start
!= mapping
->last
+ 1));
1507 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1509 * @adev: amdgpu_device pointer
1510 * @bo_va: requested BO and VM object
1511 * @clear: if true clear the entries
1513 * Fill in the page table entries for @bo_va.
1514 * Returns 0 for success, -EINVAL for failure.
1516 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
1517 struct amdgpu_bo_va
*bo_va
,
1520 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
1521 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
1522 struct amdgpu_bo_va_mapping
*mapping
;
1523 dma_addr_t
*pages_addr
= NULL
;
1524 struct ttm_mem_reg
*mem
;
1525 struct drm_mm_node
*nodes
;
1526 struct dma_fence
*exclusive
, **last_update
;
1530 if (clear
|| !bo_va
->base
.bo
) {
1535 struct ttm_dma_tt
*ttm
;
1537 mem
= &bo_va
->base
.bo
->tbo
.mem
;
1538 nodes
= mem
->mm_node
;
1539 if (mem
->mem_type
== TTM_PL_TT
) {
1540 ttm
= container_of(bo_va
->base
.bo
->tbo
.ttm
,
1541 struct ttm_dma_tt
, ttm
);
1542 pages_addr
= ttm
->dma_address
;
1544 exclusive
= reservation_object_get_excl(bo
->tbo
.resv
);
1548 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->tbo
.ttm
, mem
);
1552 if (clear
|| (bo
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
))
1553 last_update
= &vm
->last_update
;
1555 last_update
= &bo_va
->last_pt_update
;
1557 if (!clear
&& bo_va
->base
.moved
) {
1558 bo_va
->base
.moved
= false;
1559 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1561 } else if (bo_va
->cleared
!= clear
) {
1562 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1565 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1566 r
= amdgpu_vm_bo_split_mapping(adev
, exclusive
, pages_addr
, vm
,
1567 mapping
, flags
, nodes
,
1573 if (vm
->use_cpu_for_update
) {
1576 amdgpu_asic_flush_hdp(adev
, NULL
);
1579 spin_lock(&vm
->moved_lock
);
1580 list_del_init(&bo_va
->base
.vm_status
);
1581 spin_unlock(&vm
->moved_lock
);
1583 /* If the BO is not in its preferred location add it back to
1584 * the evicted list so that it gets validated again on the
1585 * next command submission.
1587 if (bo
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
) {
1588 uint32_t mem_type
= bo
->tbo
.mem
.mem_type
;
1590 if (!(bo
->preferred_domains
& amdgpu_mem_type_to_domain(mem_type
)))
1591 list_add_tail(&bo_va
->base
.vm_status
, &vm
->evicted
);
1593 list_add(&bo_va
->base
.vm_status
, &vm
->idle
);
1596 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1597 bo_va
->cleared
= clear
;
1599 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1600 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1601 trace_amdgpu_vm_bo_mapping(mapping
);
1608 * amdgpu_vm_update_prt_state - update the global PRT state
1610 static void amdgpu_vm_update_prt_state(struct amdgpu_device
*adev
)
1612 unsigned long flags
;
1615 spin_lock_irqsave(&adev
->vm_manager
.prt_lock
, flags
);
1616 enable
= !!atomic_read(&adev
->vm_manager
.num_prt_users
);
1617 adev
->gmc
.gmc_funcs
->set_prt(adev
, enable
);
1618 spin_unlock_irqrestore(&adev
->vm_manager
.prt_lock
, flags
);
1622 * amdgpu_vm_prt_get - add a PRT user
1624 static void amdgpu_vm_prt_get(struct amdgpu_device
*adev
)
1626 if (!adev
->gmc
.gmc_funcs
->set_prt
)
1629 if (atomic_inc_return(&adev
->vm_manager
.num_prt_users
) == 1)
1630 amdgpu_vm_update_prt_state(adev
);
1634 * amdgpu_vm_prt_put - drop a PRT user
1636 static void amdgpu_vm_prt_put(struct amdgpu_device
*adev
)
1638 if (atomic_dec_return(&adev
->vm_manager
.num_prt_users
) == 0)
1639 amdgpu_vm_update_prt_state(adev
);
1643 * amdgpu_vm_prt_cb - callback for updating the PRT status
1645 static void amdgpu_vm_prt_cb(struct dma_fence
*fence
, struct dma_fence_cb
*_cb
)
1647 struct amdgpu_prt_cb
*cb
= container_of(_cb
, struct amdgpu_prt_cb
, cb
);
1649 amdgpu_vm_prt_put(cb
->adev
);
1654 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1656 static void amdgpu_vm_add_prt_cb(struct amdgpu_device
*adev
,
1657 struct dma_fence
*fence
)
1659 struct amdgpu_prt_cb
*cb
;
1661 if (!adev
->gmc
.gmc_funcs
->set_prt
)
1664 cb
= kmalloc(sizeof(struct amdgpu_prt_cb
), GFP_KERNEL
);
1666 /* Last resort when we are OOM */
1668 dma_fence_wait(fence
, false);
1670 amdgpu_vm_prt_put(adev
);
1673 if (!fence
|| dma_fence_add_callback(fence
, &cb
->cb
,
1675 amdgpu_vm_prt_cb(fence
, &cb
->cb
);
1680 * amdgpu_vm_free_mapping - free a mapping
1682 * @adev: amdgpu_device pointer
1684 * @mapping: mapping to be freed
1685 * @fence: fence of the unmap operation
1687 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1689 static void amdgpu_vm_free_mapping(struct amdgpu_device
*adev
,
1690 struct amdgpu_vm
*vm
,
1691 struct amdgpu_bo_va_mapping
*mapping
,
1692 struct dma_fence
*fence
)
1694 if (mapping
->flags
& AMDGPU_PTE_PRT
)
1695 amdgpu_vm_add_prt_cb(adev
, fence
);
1700 * amdgpu_vm_prt_fini - finish all prt mappings
1702 * @adev: amdgpu_device pointer
1705 * Register a cleanup callback to disable PRT support after VM dies.
1707 static void amdgpu_vm_prt_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1709 struct reservation_object
*resv
= vm
->root
.base
.bo
->tbo
.resv
;
1710 struct dma_fence
*excl
, **shared
;
1711 unsigned i
, shared_count
;
1714 r
= reservation_object_get_fences_rcu(resv
, &excl
,
1715 &shared_count
, &shared
);
1717 /* Not enough memory to grab the fence list, as last resort
1718 * block for all the fences to complete.
1720 reservation_object_wait_timeout_rcu(resv
, true, false,
1721 MAX_SCHEDULE_TIMEOUT
);
1725 /* Add a callback for each fence in the reservation object */
1726 amdgpu_vm_prt_get(adev
);
1727 amdgpu_vm_add_prt_cb(adev
, excl
);
1729 for (i
= 0; i
< shared_count
; ++i
) {
1730 amdgpu_vm_prt_get(adev
);
1731 amdgpu_vm_add_prt_cb(adev
, shared
[i
]);
1738 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1740 * @adev: amdgpu_device pointer
1742 * @fence: optional resulting fence (unchanged if no work needed to be done
1743 * or if an error occurred)
1745 * Make sure all freed BOs are cleared in the PT.
1746 * Returns 0 for success.
1748 * PTs have to be reserved and mutex must be locked!
1750 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
1751 struct amdgpu_vm
*vm
,
1752 struct dma_fence
**fence
)
1754 struct amdgpu_bo_va_mapping
*mapping
;
1755 uint64_t init_pte_value
= 0;
1756 struct dma_fence
*f
= NULL
;
1759 while (!list_empty(&vm
->freed
)) {
1760 mapping
= list_first_entry(&vm
->freed
,
1761 struct amdgpu_bo_va_mapping
, list
);
1762 list_del(&mapping
->list
);
1764 if (vm
->pte_support_ats
&& mapping
->start
< AMDGPU_VA_HOLE_START
)
1765 init_pte_value
= AMDGPU_PTE_DEFAULT_ATC
;
1767 r
= amdgpu_vm_bo_update_mapping(adev
, NULL
, NULL
, vm
,
1768 mapping
->start
, mapping
->last
,
1769 init_pte_value
, 0, &f
);
1770 amdgpu_vm_free_mapping(adev
, vm
, mapping
, f
);
1778 dma_fence_put(*fence
);
1789 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1791 * @adev: amdgpu_device pointer
1793 * @sync: sync object to add fences to
1795 * Make sure all BOs which are moved are updated in the PTs.
1796 * Returns 0 for success.
1798 * PTs have to be reserved!
1800 int amdgpu_vm_handle_moved(struct amdgpu_device
*adev
,
1801 struct amdgpu_vm
*vm
)
1803 struct amdgpu_bo_va
*bo_va
, *tmp
;
1804 struct list_head moved
;
1808 INIT_LIST_HEAD(&moved
);
1809 spin_lock(&vm
->moved_lock
);
1810 list_splice_init(&vm
->moved
, &moved
);
1811 spin_unlock(&vm
->moved_lock
);
1813 list_for_each_entry_safe(bo_va
, tmp
, &moved
, base
.vm_status
) {
1814 struct reservation_object
*resv
= bo_va
->base
.bo
->tbo
.resv
;
1816 /* Per VM BOs never need to bo cleared in the page tables */
1817 if (resv
== vm
->root
.base
.bo
->tbo
.resv
)
1819 /* Try to reserve the BO to avoid clearing its ptes */
1820 else if (!amdgpu_vm_debug
&& reservation_object_trylock(resv
))
1822 /* Somebody else is using the BO right now */
1826 r
= amdgpu_vm_bo_update(adev
, bo_va
, clear
);
1828 spin_lock(&vm
->moved_lock
);
1829 list_splice(&moved
, &vm
->moved
);
1830 spin_unlock(&vm
->moved_lock
);
1834 if (!clear
&& resv
!= vm
->root
.base
.bo
->tbo
.resv
)
1835 reservation_object_unlock(resv
);
1843 * amdgpu_vm_bo_add - add a bo to a specific vm
1845 * @adev: amdgpu_device pointer
1847 * @bo: amdgpu buffer object
1849 * Add @bo into the requested vm.
1850 * Add @bo to the list of bos associated with the vm
1851 * Returns newly added bo_va or NULL for failure
1853 * Object has to be reserved!
1855 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
1856 struct amdgpu_vm
*vm
,
1857 struct amdgpu_bo
*bo
)
1859 struct amdgpu_bo_va
*bo_va
;
1861 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
1862 if (bo_va
== NULL
) {
1865 amdgpu_vm_bo_base_init(&bo_va
->base
, vm
, bo
);
1867 bo_va
->ref_count
= 1;
1868 INIT_LIST_HEAD(&bo_va
->valids
);
1869 INIT_LIST_HEAD(&bo_va
->invalids
);
1876 * amdgpu_vm_bo_insert_mapping - insert a new mapping
1878 * @adev: amdgpu_device pointer
1879 * @bo_va: bo_va to store the address
1880 * @mapping: the mapping to insert
1882 * Insert a new mapping into all structures.
1884 static void amdgpu_vm_bo_insert_map(struct amdgpu_device
*adev
,
1885 struct amdgpu_bo_va
*bo_va
,
1886 struct amdgpu_bo_va_mapping
*mapping
)
1888 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
1889 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
1891 mapping
->bo_va
= bo_va
;
1892 list_add(&mapping
->list
, &bo_va
->invalids
);
1893 amdgpu_vm_it_insert(mapping
, &vm
->va
);
1895 if (mapping
->flags
& AMDGPU_PTE_PRT
)
1896 amdgpu_vm_prt_get(adev
);
1898 if (bo
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
&&
1899 !bo_va
->base
.moved
) {
1900 spin_lock(&vm
->moved_lock
);
1901 list_move(&bo_va
->base
.vm_status
, &vm
->moved
);
1902 spin_unlock(&vm
->moved_lock
);
1904 trace_amdgpu_vm_bo_map(bo_va
, mapping
);
1908 * amdgpu_vm_bo_map - map bo inside a vm
1910 * @adev: amdgpu_device pointer
1911 * @bo_va: bo_va to store the address
1912 * @saddr: where to map the BO
1913 * @offset: requested offset in the BO
1914 * @flags: attributes of pages (read/write/valid/etc.)
1916 * Add a mapping of the BO at the specefied addr into the VM.
1917 * Returns 0 for success, error for failure.
1919 * Object has to be reserved and unreserved outside!
1921 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
1922 struct amdgpu_bo_va
*bo_va
,
1923 uint64_t saddr
, uint64_t offset
,
1924 uint64_t size
, uint64_t flags
)
1926 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
1927 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
1928 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
1931 /* validate the parameters */
1932 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
1933 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
1936 /* make sure object fit at this offset */
1937 eaddr
= saddr
+ size
- 1;
1938 if (saddr
>= eaddr
||
1939 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)))
1942 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1943 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
1945 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
1947 /* bo and tmp overlap, invalid addr */
1948 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1949 "0x%010Lx-0x%010Lx\n", bo
, saddr
, eaddr
,
1950 tmp
->start
, tmp
->last
+ 1);
1954 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
1958 mapping
->start
= saddr
;
1959 mapping
->last
= eaddr
;
1960 mapping
->offset
= offset
;
1961 mapping
->flags
= flags
;
1963 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
1969 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1971 * @adev: amdgpu_device pointer
1972 * @bo_va: bo_va to store the address
1973 * @saddr: where to map the BO
1974 * @offset: requested offset in the BO
1975 * @flags: attributes of pages (read/write/valid/etc.)
1977 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1978 * mappings as we do so.
1979 * Returns 0 for success, error for failure.
1981 * Object has to be reserved and unreserved outside!
1983 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
1984 struct amdgpu_bo_va
*bo_va
,
1985 uint64_t saddr
, uint64_t offset
,
1986 uint64_t size
, uint64_t flags
)
1988 struct amdgpu_bo_va_mapping
*mapping
;
1989 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
1993 /* validate the parameters */
1994 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
1995 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
1998 /* make sure object fit at this offset */
1999 eaddr
= saddr
+ size
- 1;
2000 if (saddr
>= eaddr
||
2001 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)))
2004 /* Allocate all the needed memory */
2005 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2009 r
= amdgpu_vm_bo_clear_mappings(adev
, bo_va
->base
.vm
, saddr
, size
);
2015 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2016 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2018 mapping
->start
= saddr
;
2019 mapping
->last
= eaddr
;
2020 mapping
->offset
= offset
;
2021 mapping
->flags
= flags
;
2023 amdgpu_vm_bo_insert_map(adev
, bo_va
, mapping
);
2029 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2031 * @adev: amdgpu_device pointer
2032 * @bo_va: bo_va to remove the address from
2033 * @saddr: where to the BO is mapped
2035 * Remove a mapping of the BO at the specefied addr from the VM.
2036 * Returns 0 for success, error for failure.
2038 * Object has to be reserved and unreserved outside!
2040 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
2041 struct amdgpu_bo_va
*bo_va
,
2044 struct amdgpu_bo_va_mapping
*mapping
;
2045 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2048 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2050 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
2051 if (mapping
->start
== saddr
)
2055 if (&mapping
->list
== &bo_va
->valids
) {
2058 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
2059 if (mapping
->start
== saddr
)
2063 if (&mapping
->list
== &bo_va
->invalids
)
2067 list_del(&mapping
->list
);
2068 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2069 mapping
->bo_va
= NULL
;
2070 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2073 list_add(&mapping
->list
, &vm
->freed
);
2075 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2076 bo_va
->last_pt_update
);
2082 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2084 * @adev: amdgpu_device pointer
2085 * @vm: VM structure to use
2086 * @saddr: start of the range
2087 * @size: size of the range
2089 * Remove all mappings in a range, split them as appropriate.
2090 * Returns 0 for success, error for failure.
2092 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
2093 struct amdgpu_vm
*vm
,
2094 uint64_t saddr
, uint64_t size
)
2096 struct amdgpu_bo_va_mapping
*before
, *after
, *tmp
, *next
;
2100 eaddr
= saddr
+ size
- 1;
2101 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2102 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2104 /* Allocate all the needed memory */
2105 before
= kzalloc(sizeof(*before
), GFP_KERNEL
);
2108 INIT_LIST_HEAD(&before
->list
);
2110 after
= kzalloc(sizeof(*after
), GFP_KERNEL
);
2115 INIT_LIST_HEAD(&after
->list
);
2117 /* Now gather all removed mappings */
2118 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2120 /* Remember mapping split at the start */
2121 if (tmp
->start
< saddr
) {
2122 before
->start
= tmp
->start
;
2123 before
->last
= saddr
- 1;
2124 before
->offset
= tmp
->offset
;
2125 before
->flags
= tmp
->flags
;
2126 list_add(&before
->list
, &tmp
->list
);
2129 /* Remember mapping split at the end */
2130 if (tmp
->last
> eaddr
) {
2131 after
->start
= eaddr
+ 1;
2132 after
->last
= tmp
->last
;
2133 after
->offset
= tmp
->offset
;
2134 after
->offset
+= after
->start
- tmp
->start
;
2135 after
->flags
= tmp
->flags
;
2136 list_add(&after
->list
, &tmp
->list
);
2139 list_del(&tmp
->list
);
2140 list_add(&tmp
->list
, &removed
);
2142 tmp
= amdgpu_vm_it_iter_next(tmp
, saddr
, eaddr
);
2145 /* And free them up */
2146 list_for_each_entry_safe(tmp
, next
, &removed
, list
) {
2147 amdgpu_vm_it_remove(tmp
, &vm
->va
);
2148 list_del(&tmp
->list
);
2150 if (tmp
->start
< saddr
)
2152 if (tmp
->last
> eaddr
)
2156 list_add(&tmp
->list
, &vm
->freed
);
2157 trace_amdgpu_vm_bo_unmap(NULL
, tmp
);
2160 /* Insert partial mapping before the range */
2161 if (!list_empty(&before
->list
)) {
2162 amdgpu_vm_it_insert(before
, &vm
->va
);
2163 if (before
->flags
& AMDGPU_PTE_PRT
)
2164 amdgpu_vm_prt_get(adev
);
2169 /* Insert partial mapping after the range */
2170 if (!list_empty(&after
->list
)) {
2171 amdgpu_vm_it_insert(after
, &vm
->va
);
2172 if (after
->flags
& AMDGPU_PTE_PRT
)
2173 amdgpu_vm_prt_get(adev
);
2182 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2184 * @vm: the requested VM
2186 * Find a mapping by it's address.
2188 struct amdgpu_bo_va_mapping
*amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm
*vm
,
2191 return amdgpu_vm_it_iter_first(&vm
->va
, addr
, addr
);
2195 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2197 * @adev: amdgpu_device pointer
2198 * @bo_va: requested bo_va
2200 * Remove @bo_va->bo from the requested vm.
2202 * Object have to be reserved!
2204 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
2205 struct amdgpu_bo_va
*bo_va
)
2207 struct amdgpu_bo_va_mapping
*mapping
, *next
;
2208 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2210 list_del(&bo_va
->base
.bo_list
);
2212 spin_lock(&vm
->moved_lock
);
2213 list_del(&bo_va
->base
.vm_status
);
2214 spin_unlock(&vm
->moved_lock
);
2216 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
2217 list_del(&mapping
->list
);
2218 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2219 mapping
->bo_va
= NULL
;
2220 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2221 list_add(&mapping
->list
, &vm
->freed
);
2223 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
2224 list_del(&mapping
->list
);
2225 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2226 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2227 bo_va
->last_pt_update
);
2230 dma_fence_put(bo_va
->last_pt_update
);
2235 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2237 * @adev: amdgpu_device pointer
2239 * @bo: amdgpu buffer object
2241 * Mark @bo as invalid.
2243 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
2244 struct amdgpu_bo
*bo
, bool evicted
)
2246 struct amdgpu_vm_bo_base
*bo_base
;
2248 /* shadow bo doesn't have bo base, its validation needs its parent */
2249 if (bo
->parent
&& bo
->parent
->shadow
== bo
)
2252 list_for_each_entry(bo_base
, &bo
->va
, bo_list
) {
2253 struct amdgpu_vm
*vm
= bo_base
->vm
;
2254 bool was_moved
= bo_base
->moved
;
2256 bo_base
->moved
= true;
2257 if (evicted
&& bo
->tbo
.resv
== vm
->root
.base
.bo
->tbo
.resv
) {
2258 if (bo
->tbo
.type
== ttm_bo_type_kernel
)
2259 list_move(&bo_base
->vm_status
, &vm
->evicted
);
2261 list_move_tail(&bo_base
->vm_status
,
2269 if (bo
->tbo
.type
== ttm_bo_type_kernel
) {
2270 list_move(&bo_base
->vm_status
, &vm
->relocated
);
2272 spin_lock(&bo_base
->vm
->moved_lock
);
2273 list_move(&bo_base
->vm_status
, &vm
->moved
);
2274 spin_unlock(&bo_base
->vm
->moved_lock
);
2279 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size
)
2281 /* Total bits covered by PD + PTs */
2282 unsigned bits
= ilog2(vm_size
) + 18;
2284 /* Make sure the PD is 4K in size up to 8GB address space.
2285 Above that split equal between PD and PTs */
2289 return ((bits
+ 3) / 2);
2293 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2295 * @adev: amdgpu_device pointer
2296 * @vm_size: the default vm size if it's set auto
2298 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint32_t vm_size
,
2299 uint32_t fragment_size_default
, unsigned max_level
,
2304 /* adjust vm size first */
2305 if (amdgpu_vm_size
!= -1) {
2306 unsigned max_size
= 1 << (max_bits
- 30);
2308 vm_size
= amdgpu_vm_size
;
2309 if (vm_size
> max_size
) {
2310 dev_warn(adev
->dev
, "VM size (%d) too large, max is %u GB\n",
2311 amdgpu_vm_size
, max_size
);
2316 adev
->vm_manager
.max_pfn
= (uint64_t)vm_size
<< 18;
2318 tmp
= roundup_pow_of_two(adev
->vm_manager
.max_pfn
);
2319 if (amdgpu_vm_block_size
!= -1)
2320 tmp
>>= amdgpu_vm_block_size
- 9;
2321 tmp
= DIV_ROUND_UP(fls64(tmp
) - 1, 9) - 1;
2322 adev
->vm_manager
.num_level
= min(max_level
, (unsigned)tmp
);
2323 switch (adev
->vm_manager
.num_level
) {
2325 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB2
;
2328 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB1
;
2331 adev
->vm_manager
.root_level
= AMDGPU_VM_PDB0
;
2334 dev_err(adev
->dev
, "VMPT only supports 2~4+1 levels\n");
2336 /* block size depends on vm size and hw setup*/
2337 if (amdgpu_vm_block_size
!= -1)
2338 adev
->vm_manager
.block_size
=
2339 min((unsigned)amdgpu_vm_block_size
, max_bits
2340 - AMDGPU_GPU_PAGE_SHIFT
2341 - 9 * adev
->vm_manager
.num_level
);
2342 else if (adev
->vm_manager
.num_level
> 1)
2343 adev
->vm_manager
.block_size
= 9;
2345 adev
->vm_manager
.block_size
= amdgpu_vm_get_block_size(tmp
);
2347 if (amdgpu_vm_fragment_size
== -1)
2348 adev
->vm_manager
.fragment_size
= fragment_size_default
;
2350 adev
->vm_manager
.fragment_size
= amdgpu_vm_fragment_size
;
2352 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2353 vm_size
, adev
->vm_manager
.num_level
+ 1,
2354 adev
->vm_manager
.block_size
,
2355 adev
->vm_manager
.fragment_size
);
2359 * amdgpu_vm_init - initialize a vm instance
2361 * @adev: amdgpu_device pointer
2363 * @vm_context: Indicates if it GFX or Compute context
2367 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
2368 int vm_context
, unsigned int pasid
)
2370 struct amdgpu_bo_param bp
;
2371 struct amdgpu_bo
*root
;
2372 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
2373 AMDGPU_VM_PTE_COUNT(adev
) * 8);
2374 unsigned ring_instance
;
2375 struct amdgpu_ring
*ring
;
2376 struct drm_sched_rq
*rq
;
2381 vm
->va
= RB_ROOT_CACHED
;
2382 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2383 vm
->reserved_vmid
[i
] = NULL
;
2384 INIT_LIST_HEAD(&vm
->evicted
);
2385 INIT_LIST_HEAD(&vm
->relocated
);
2386 spin_lock_init(&vm
->moved_lock
);
2387 INIT_LIST_HEAD(&vm
->moved
);
2388 INIT_LIST_HEAD(&vm
->idle
);
2389 INIT_LIST_HEAD(&vm
->freed
);
2391 /* create scheduler entity for page table updates */
2393 ring_instance
= atomic_inc_return(&adev
->vm_manager
.vm_pte_next_ring
);
2394 ring_instance
%= adev
->vm_manager
.vm_pte_num_rings
;
2395 ring
= adev
->vm_manager
.vm_pte_rings
[ring_instance
];
2396 rq
= &ring
->sched
.sched_rq
[DRM_SCHED_PRIORITY_KERNEL
];
2397 r
= drm_sched_entity_init(&ring
->sched
, &vm
->entity
,
2402 vm
->pte_support_ats
= false;
2404 if (vm_context
== AMDGPU_VM_CONTEXT_COMPUTE
) {
2405 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2406 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
2408 if (adev
->asic_type
== CHIP_RAVEN
)
2409 vm
->pte_support_ats
= true;
2411 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2412 AMDGPU_VM_USE_CPU_FOR_GFX
);
2414 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2415 vm
->use_cpu_for_update
? "CPU" : "SDMA");
2416 WARN_ONCE((vm
->use_cpu_for_update
& !amdgpu_vm_is_large_bar(adev
)),
2417 "CPU update of VM recommended only for large BAR system\n");
2418 vm
->last_update
= NULL
;
2420 flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
2421 if (vm
->use_cpu_for_update
)
2422 flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
2424 flags
|= AMDGPU_GEM_CREATE_SHADOW
;
2426 size
= amdgpu_vm_bo_size(adev
, adev
->vm_manager
.root_level
);
2427 memset(&bp
, 0, sizeof(bp
));
2429 bp
.byte_align
= align
;
2430 bp
.domain
= AMDGPU_GEM_DOMAIN_VRAM
;
2432 bp
.type
= ttm_bo_type_kernel
;
2434 r
= amdgpu_bo_create(adev
, &bp
, &root
);
2436 goto error_free_sched_entity
;
2438 r
= amdgpu_bo_reserve(root
, true);
2440 goto error_free_root
;
2442 r
= amdgpu_vm_clear_bo(adev
, vm
, root
,
2443 adev
->vm_manager
.root_level
,
2444 vm
->pte_support_ats
);
2446 goto error_unreserve
;
2448 amdgpu_vm_bo_base_init(&vm
->root
.base
, vm
, root
);
2449 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
2452 unsigned long flags
;
2454 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
2455 r
= idr_alloc(&adev
->vm_manager
.pasid_idr
, vm
, pasid
, pasid
+ 1,
2457 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
2459 goto error_free_root
;
2464 INIT_KFIFO(vm
->faults
);
2465 vm
->fault_credit
= 16;
2470 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
2473 amdgpu_bo_unref(&vm
->root
.base
.bo
->shadow
);
2474 amdgpu_bo_unref(&vm
->root
.base
.bo
);
2475 vm
->root
.base
.bo
= NULL
;
2477 error_free_sched_entity
:
2478 drm_sched_entity_fini(&ring
->sched
, &vm
->entity
);
2484 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2486 * This only works on GFX VMs that don't have any BOs added and no
2487 * page tables allocated yet.
2489 * Changes the following VM parameters:
2490 * - use_cpu_for_update
2491 * - pte_supports_ats
2492 * - pasid (old PASID is released, because compute manages its own PASIDs)
2494 * Reinitializes the page directory to reflect the changed ATS
2495 * setting. May leave behind an unused shadow BO for the page
2496 * directory when switching from SDMA updates to CPU updates.
2498 * Returns 0 for success, -errno for errors.
2500 int amdgpu_vm_make_compute(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2502 bool pte_support_ats
= (adev
->asic_type
== CHIP_RAVEN
);
2505 r
= amdgpu_bo_reserve(vm
->root
.base
.bo
, true);
2510 if (!RB_EMPTY_ROOT(&vm
->va
.rb_root
) || vm
->root
.entries
) {
2515 /* Check if PD needs to be reinitialized and do it before
2516 * changing any other state, in case it fails.
2518 if (pte_support_ats
!= vm
->pte_support_ats
) {
2519 r
= amdgpu_vm_clear_bo(adev
, vm
, vm
->root
.base
.bo
,
2520 adev
->vm_manager
.root_level
,
2526 /* Update VM state */
2527 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2528 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
2529 vm
->pte_support_ats
= pte_support_ats
;
2530 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2531 vm
->use_cpu_for_update
? "CPU" : "SDMA");
2532 WARN_ONCE((vm
->use_cpu_for_update
& !amdgpu_vm_is_large_bar(adev
)),
2533 "CPU update of VM recommended only for large BAR system\n");
2536 unsigned long flags
;
2538 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
2539 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
2540 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
2546 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
2551 * amdgpu_vm_free_levels - free PD/PT levels
2553 * @adev: amdgpu device structure
2554 * @parent: PD/PT starting level to free
2555 * @level: level of parent structure
2557 * Free the page directory or page table level and all sub levels.
2559 static void amdgpu_vm_free_levels(struct amdgpu_device
*adev
,
2560 struct amdgpu_vm_pt
*parent
,
2563 unsigned i
, num_entries
= amdgpu_vm_num_entries(adev
, level
);
2565 if (parent
->base
.bo
) {
2566 list_del(&parent
->base
.bo_list
);
2567 list_del(&parent
->base
.vm_status
);
2568 amdgpu_bo_unref(&parent
->base
.bo
->shadow
);
2569 amdgpu_bo_unref(&parent
->base
.bo
);
2572 if (parent
->entries
)
2573 for (i
= 0; i
< num_entries
; i
++)
2574 amdgpu_vm_free_levels(adev
, &parent
->entries
[i
],
2577 kvfree(parent
->entries
);
2581 * amdgpu_vm_fini - tear down a vm instance
2583 * @adev: amdgpu_device pointer
2587 * Unbind the VM and remove all bos from the vm bo list
2589 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2591 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2592 bool prt_fini_needed
= !!adev
->gmc
.gmc_funcs
->set_prt
;
2593 struct amdgpu_bo
*root
;
2597 amdgpu_amdkfd_gpuvm_destroy_cb(adev
, vm
);
2599 /* Clear pending page faults from IH when the VM is destroyed */
2600 while (kfifo_get(&vm
->faults
, &fault
))
2601 amdgpu_ih_clear_fault(adev
, fault
);
2604 unsigned long flags
;
2606 spin_lock_irqsave(&adev
->vm_manager
.pasid_lock
, flags
);
2607 idr_remove(&adev
->vm_manager
.pasid_idr
, vm
->pasid
);
2608 spin_unlock_irqrestore(&adev
->vm_manager
.pasid_lock
, flags
);
2611 drm_sched_entity_fini(vm
->entity
.sched
, &vm
->entity
);
2613 if (!RB_EMPTY_ROOT(&vm
->va
.rb_root
)) {
2614 dev_err(adev
->dev
, "still active bo inside vm\n");
2616 rbtree_postorder_for_each_entry_safe(mapping
, tmp
,
2617 &vm
->va
.rb_root
, rb
) {
2618 list_del(&mapping
->list
);
2619 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2622 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
2623 if (mapping
->flags
& AMDGPU_PTE_PRT
&& prt_fini_needed
) {
2624 amdgpu_vm_prt_fini(adev
, vm
);
2625 prt_fini_needed
= false;
2628 list_del(&mapping
->list
);
2629 amdgpu_vm_free_mapping(adev
, vm
, mapping
, NULL
);
2632 root
= amdgpu_bo_ref(vm
->root
.base
.bo
);
2633 r
= amdgpu_bo_reserve(root
, true);
2635 dev_err(adev
->dev
, "Leaking page tables because BO reservation failed\n");
2637 amdgpu_vm_free_levels(adev
, &vm
->root
,
2638 adev
->vm_manager
.root_level
);
2639 amdgpu_bo_unreserve(root
);
2641 amdgpu_bo_unref(&root
);
2642 dma_fence_put(vm
->last_update
);
2643 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2644 amdgpu_vmid_free_reserved(adev
, vm
, i
);
2648 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2650 * @adev: amdgpu_device pointer
2651 * @pasid: PASID do identify the VM
2653 * This function is expected to be called in interrupt context. Returns
2654 * true if there was fault credit, false otherwise
2656 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device
*adev
,
2659 struct amdgpu_vm
*vm
;
2661 spin_lock(&adev
->vm_manager
.pasid_lock
);
2662 vm
= idr_find(&adev
->vm_manager
.pasid_idr
, pasid
);
2664 /* VM not found, can't track fault credit */
2665 spin_unlock(&adev
->vm_manager
.pasid_lock
);
2669 /* No lock needed. only accessed by IRQ handler */
2670 if (!vm
->fault_credit
) {
2671 /* Too many faults in this VM */
2672 spin_unlock(&adev
->vm_manager
.pasid_lock
);
2677 spin_unlock(&adev
->vm_manager
.pasid_lock
);
2682 * amdgpu_vm_manager_init - init the VM manager
2684 * @adev: amdgpu_device pointer
2686 * Initialize the VM manager structures
2688 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
2692 amdgpu_vmid_mgr_init(adev
);
2694 adev
->vm_manager
.fence_context
=
2695 dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
2696 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
2697 adev
->vm_manager
.seqno
[i
] = 0;
2699 atomic_set(&adev
->vm_manager
.vm_pte_next_ring
, 0);
2700 spin_lock_init(&adev
->vm_manager
.prt_lock
);
2701 atomic_set(&adev
->vm_manager
.num_prt_users
, 0);
2703 /* If not overridden by the user, by default, only in large BAR systems
2704 * Compute VM tables will be updated by CPU
2706 #ifdef CONFIG_X86_64
2707 if (amdgpu_vm_update_mode
== -1) {
2708 if (amdgpu_vm_is_large_bar(adev
))
2709 adev
->vm_manager
.vm_update_mode
=
2710 AMDGPU_VM_USE_CPU_FOR_COMPUTE
;
2712 adev
->vm_manager
.vm_update_mode
= 0;
2714 adev
->vm_manager
.vm_update_mode
= amdgpu_vm_update_mode
;
2716 adev
->vm_manager
.vm_update_mode
= 0;
2719 idr_init(&adev
->vm_manager
.pasid_idr
);
2720 spin_lock_init(&adev
->vm_manager
.pasid_lock
);
2724 * amdgpu_vm_manager_fini - cleanup VM manager
2726 * @adev: amdgpu_device pointer
2728 * Cleanup the VM manager and free resources.
2730 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
2732 WARN_ON(!idr_is_empty(&adev
->vm_manager
.pasid_idr
));
2733 idr_destroy(&adev
->vm_manager
.pasid_idr
);
2735 amdgpu_vmid_mgr_fini(adev
);
2738 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
2740 union drm_amdgpu_vm
*args
= data
;
2741 struct amdgpu_device
*adev
= dev
->dev_private
;
2742 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
2745 switch (args
->in
.op
) {
2746 case AMDGPU_VM_OP_RESERVE_VMID
:
2747 /* current, we only have requirement to reserve vmid from gfxhub */
2748 r
= amdgpu_vmid_alloc_reserved(adev
, &fpriv
->vm
, AMDGPU_GFXHUB
);
2752 case AMDGPU_VM_OP_UNRESERVE_VMID
:
2753 amdgpu_vmid_free_reserved(adev
, &fpriv
->vm
, AMDGPU_GFXHUB
);