2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
31 #include <drm/amdgpu_drm.h>
33 #include "amdgpu_trace.h"
37 * GPUVM is similar to the legacy gart on older asics, however
38 * rather than there being a single global gart table
39 * for the entire GPU, there are multiple VM page tables active
40 * at any given time. The VM page tables can contain a mix
41 * vram pages and system memory pages and system memory pages
42 * can be mapped as snooped (cached system pages) or unsnooped
43 * (uncached system pages).
44 * Each VM has an ID associated with it and there is a page table
45 * associated with each VMID. When execting a command buffer,
46 * the kernel tells the the ring what VMID to use for that command
47 * buffer. VMIDs are allocated dynamically as commands are submitted.
48 * The userspace drivers maintain their own address space and the kernel
49 * sets up their pages tables accordingly when they submit their
50 * command buffers and a VMID is assigned.
51 * Cayman/Trinity support up to 8 active VMs at any given time;
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping
, rb
, uint64_t, __subtree_last
,
59 START
, LAST
, static, amdgpu_vm_it
)
64 /* Local structure. Encapsulate some VM table update parameters to reduce
65 * the number of function parameters
67 struct amdgpu_pte_update_params
{
68 /* amdgpu device we do this update for */
69 struct amdgpu_device
*adev
;
70 /* optional amdgpu_vm we do this update for */
72 /* address where to copy page table entries from */
74 /* indirect buffer to fill with commands */
76 /* Function which actually does the update */
77 void (*func
)(struct amdgpu_pte_update_params
*params
, uint64_t pe
,
78 uint64_t addr
, unsigned count
, uint32_t incr
,
80 /* indicate update pt or its shadow */
82 /* The next two are used during VM update by CPU
83 * DMA addresses to use for mapping
84 * Kernel pointer of PD/PT BO that needs to be updated
86 dma_addr_t
*pages_addr
;
90 /* Helper to disable partial resident texture feature from a fence callback */
91 struct amdgpu_prt_cb
{
92 struct amdgpu_device
*adev
;
93 struct dma_fence_cb cb
;
97 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
99 * @adev: amdgpu_device pointer
101 * Calculate the number of entries in a page directory or page table.
103 static unsigned amdgpu_vm_num_entries(struct amdgpu_device
*adev
,
107 /* For the root directory */
108 return adev
->vm_manager
.max_pfn
>>
109 (adev
->vm_manager
.block_size
*
110 adev
->vm_manager
.num_level
);
111 else if (level
== adev
->vm_manager
.num_level
)
112 /* For the page tables on the leaves */
113 return AMDGPU_VM_PTE_COUNT(adev
);
115 /* Everything in between */
116 return 1 << adev
->vm_manager
.block_size
;
120 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
122 * @adev: amdgpu_device pointer
124 * Calculate the size of the BO for a page directory or page table in bytes.
126 static unsigned amdgpu_vm_bo_size(struct amdgpu_device
*adev
, unsigned level
)
128 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev
, level
) * 8);
132 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
134 * @vm: vm providing the BOs
135 * @validated: head of validation list
136 * @entry: entry to add
138 * Add the page directory to the list of BOs to
139 * validate for command submission.
141 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
142 struct list_head
*validated
,
143 struct amdgpu_bo_list_entry
*entry
)
145 entry
->robj
= vm
->root
.bo
;
147 entry
->tv
.bo
= &entry
->robj
->tbo
;
148 entry
->tv
.shared
= true;
149 entry
->user_pages
= NULL
;
150 list_add(&entry
->tv
.head
, validated
);
154 * amdgpu_vm_validate_layer - validate a single page table level
156 * @parent: parent page table level
157 * @validate: callback to do the validation
158 * @param: parameter for the validation callback
160 * Validate the page table BOs on command submission if neccessary.
162 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt
*parent
,
163 int (*validate
)(void *, struct amdgpu_bo
*),
169 if (!parent
->entries
)
172 for (i
= 0; i
<= parent
->last_entry_used
; ++i
) {
173 struct amdgpu_vm_pt
*entry
= &parent
->entries
[i
];
178 r
= validate(param
, entry
->bo
);
183 * Recurse into the sub directory. This is harmless because we
184 * have only a maximum of 5 layers.
186 r
= amdgpu_vm_validate_level(entry
, validate
, param
);
195 * amdgpu_vm_validate_pt_bos - validate the page table BOs
197 * @adev: amdgpu device pointer
198 * @vm: vm providing the BOs
199 * @validate: callback to do the validation
200 * @param: parameter for the validation callback
202 * Validate the page table BOs on command submission if neccessary.
204 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
205 int (*validate
)(void *p
, struct amdgpu_bo
*bo
),
208 uint64_t num_evictions
;
210 /* We only need to validate the page tables
211 * if they aren't already valid.
213 num_evictions
= atomic64_read(&adev
->num_evictions
);
214 if (num_evictions
== vm
->last_eviction_counter
)
217 return amdgpu_vm_validate_level(&vm
->root
, validate
, param
);
221 * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
223 * @adev: amdgpu device instance
224 * @vm: vm providing the BOs
226 * Move the PT BOs to the tail of the LRU.
228 static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt
*parent
)
232 if (!parent
->entries
)
235 for (i
= 0; i
<= parent
->last_entry_used
; ++i
) {
236 struct amdgpu_vm_pt
*entry
= &parent
->entries
[i
];
241 ttm_bo_move_to_lru_tail(&entry
->bo
->tbo
);
242 amdgpu_vm_move_level_in_lru(entry
);
247 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
249 * @adev: amdgpu device instance
250 * @vm: vm providing the BOs
252 * Move the PT BOs to the tail of the LRU.
254 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device
*adev
,
255 struct amdgpu_vm
*vm
)
257 struct ttm_bo_global
*glob
= adev
->mman
.bdev
.glob
;
259 spin_lock(&glob
->lru_lock
);
260 amdgpu_vm_move_level_in_lru(&vm
->root
);
261 spin_unlock(&glob
->lru_lock
);
265 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
267 * @adev: amdgpu_device pointer
269 * @saddr: start of the address range
270 * @eaddr: end of the address range
272 * Make sure the page directories and page tables are allocated
274 static int amdgpu_vm_alloc_levels(struct amdgpu_device
*adev
,
275 struct amdgpu_vm
*vm
,
276 struct amdgpu_vm_pt
*parent
,
277 uint64_t saddr
, uint64_t eaddr
,
280 unsigned shift
= (adev
->vm_manager
.num_level
- level
) *
281 adev
->vm_manager
.block_size
;
282 unsigned pt_idx
, from
, to
;
286 if (!parent
->entries
) {
287 unsigned num_entries
= amdgpu_vm_num_entries(adev
, level
);
289 parent
->entries
= kvmalloc_array(num_entries
,
290 sizeof(struct amdgpu_vm_pt
),
291 GFP_KERNEL
| __GFP_ZERO
);
292 if (!parent
->entries
)
294 memset(parent
->entries
, 0 , sizeof(struct amdgpu_vm_pt
));
297 from
= saddr
>> shift
;
299 if (from
>= amdgpu_vm_num_entries(adev
, level
) ||
300 to
>= amdgpu_vm_num_entries(adev
, level
))
303 if (to
> parent
->last_entry_used
)
304 parent
->last_entry_used
= to
;
307 saddr
= saddr
& ((1 << shift
) - 1);
308 eaddr
= eaddr
& ((1 << shift
) - 1);
310 flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
311 AMDGPU_GEM_CREATE_VRAM_CLEARED
;
312 if (vm
->use_cpu_for_update
)
313 flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
315 flags
|= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
316 AMDGPU_GEM_CREATE_SHADOW
);
318 /* walk over the address space and allocate the page tables */
319 for (pt_idx
= from
; pt_idx
<= to
; ++pt_idx
) {
320 struct reservation_object
*resv
= vm
->root
.bo
->tbo
.resv
;
321 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
322 struct amdgpu_bo
*pt
;
325 r
= amdgpu_bo_create(adev
,
326 amdgpu_vm_bo_size(adev
, level
),
327 AMDGPU_GPU_PAGE_SIZE
, true,
328 AMDGPU_GEM_DOMAIN_VRAM
,
334 /* Keep a reference to the root directory to avoid
335 * freeing them up in the wrong order.
337 pt
->parent
= amdgpu_bo_ref(vm
->root
.bo
);
343 if (level
< adev
->vm_manager
.num_level
) {
344 uint64_t sub_saddr
= (pt_idx
== from
) ? saddr
: 0;
345 uint64_t sub_eaddr
= (pt_idx
== to
) ? eaddr
:
347 r
= amdgpu_vm_alloc_levels(adev
, vm
, entry
, sub_saddr
,
358 * amdgpu_vm_alloc_pts - Allocate page tables.
360 * @adev: amdgpu_device pointer
361 * @vm: VM to allocate page tables for
362 * @saddr: Start address which needs to be allocated
363 * @size: Size from start address we need.
365 * Make sure the page tables are allocated.
367 int amdgpu_vm_alloc_pts(struct amdgpu_device
*adev
,
368 struct amdgpu_vm
*vm
,
369 uint64_t saddr
, uint64_t size
)
374 /* validate the parameters */
375 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| size
& AMDGPU_GPU_PAGE_MASK
)
378 eaddr
= saddr
+ size
- 1;
379 last_pfn
= eaddr
/ AMDGPU_GPU_PAGE_SIZE
;
380 if (last_pfn
>= adev
->vm_manager
.max_pfn
) {
381 dev_err(adev
->dev
, "va above limit (0x%08llX >= 0x%08llX)\n",
382 last_pfn
, adev
->vm_manager
.max_pfn
);
386 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
387 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
389 return amdgpu_vm_alloc_levels(adev
, vm
, &vm
->root
, saddr
, eaddr
, 0);
393 * amdgpu_vm_had_gpu_reset - check if reset occured since last use
395 * @adev: amdgpu_device pointer
396 * @id: VMID structure
398 * Check if GPU reset occured since last use of the VMID.
400 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device
*adev
,
401 struct amdgpu_vm_id
*id
)
403 return id
->current_gpu_reset_count
!=
404 atomic_read(&adev
->gpu_reset_counter
);
407 static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm
*vm
, unsigned vmhub
)
409 return !!vm
->reserved_vmid
[vmhub
];
412 /* idr_mgr->lock must be held */
413 static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm
*vm
,
414 struct amdgpu_ring
*ring
,
415 struct amdgpu_sync
*sync
,
416 struct dma_fence
*fence
,
417 struct amdgpu_job
*job
)
419 struct amdgpu_device
*adev
= ring
->adev
;
420 unsigned vmhub
= ring
->funcs
->vmhub
;
421 uint64_t fence_context
= adev
->fence_context
+ ring
->idx
;
422 struct amdgpu_vm_id
*id
= vm
->reserved_vmid
[vmhub
];
423 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
424 struct dma_fence
*updates
= sync
->last_vm_update
;
426 struct dma_fence
*flushed
, *tmp
;
427 bool needs_flush
= false;
429 flushed
= id
->flushed_updates
;
430 if ((amdgpu_vm_had_gpu_reset(adev
, id
)) ||
431 (atomic64_read(&id
->owner
) != vm
->client_id
) ||
432 (job
->vm_pd_addr
!= id
->pd_gpu_addr
) ||
433 (updates
&& (!flushed
|| updates
->context
!= flushed
->context
||
434 dma_fence_is_later(updates
, flushed
))) ||
435 (!id
->last_flush
|| (id
->last_flush
->context
!= fence_context
&&
436 !dma_fence_is_signaled(id
->last_flush
)))) {
438 /* to prevent one context starved by another context */
440 tmp
= amdgpu_sync_peek_fence(&id
->active
, ring
);
442 r
= amdgpu_sync_fence(adev
, sync
, tmp
);
447 /* Good we can use this VMID. Remember this submission as
450 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
454 if (updates
&& (!flushed
|| updates
->context
!= flushed
->context
||
455 dma_fence_is_later(updates
, flushed
))) {
456 dma_fence_put(id
->flushed_updates
);
457 id
->flushed_updates
= dma_fence_get(updates
);
459 id
->pd_gpu_addr
= job
->vm_pd_addr
;
460 atomic64_set(&id
->owner
, vm
->client_id
);
461 job
->vm_needs_flush
= needs_flush
;
463 dma_fence_put(id
->last_flush
);
464 id
->last_flush
= NULL
;
466 job
->vm_id
= id
- id_mgr
->ids
;
467 trace_amdgpu_vm_grab_id(vm
, ring
, job
);
473 * amdgpu_vm_grab_id - allocate the next free VMID
475 * @vm: vm to allocate id for
476 * @ring: ring we want to submit job to
477 * @sync: sync object where we add dependencies
478 * @fence: fence protecting ID from reuse
480 * Allocate an id for the vm, adding fences to the sync obj as necessary.
482 int amdgpu_vm_grab_id(struct amdgpu_vm
*vm
, struct amdgpu_ring
*ring
,
483 struct amdgpu_sync
*sync
, struct dma_fence
*fence
,
484 struct amdgpu_job
*job
)
486 struct amdgpu_device
*adev
= ring
->adev
;
487 unsigned vmhub
= ring
->funcs
->vmhub
;
488 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
489 uint64_t fence_context
= adev
->fence_context
+ ring
->idx
;
490 struct dma_fence
*updates
= sync
->last_vm_update
;
491 struct amdgpu_vm_id
*id
, *idle
;
492 struct dma_fence
**fences
;
496 mutex_lock(&id_mgr
->lock
);
497 if (amdgpu_vm_reserved_vmid_ready(vm
, vmhub
)) {
498 r
= amdgpu_vm_grab_reserved_vmid_locked(vm
, ring
, sync
, fence
, job
);
499 mutex_unlock(&id_mgr
->lock
);
502 fences
= kmalloc_array(sizeof(void *), id_mgr
->num_ids
, GFP_KERNEL
);
504 mutex_unlock(&id_mgr
->lock
);
507 /* Check if we have an idle VMID */
509 list_for_each_entry(idle
, &id_mgr
->ids_lru
, list
) {
510 fences
[i
] = amdgpu_sync_peek_fence(&idle
->active
, ring
);
516 /* If we can't find a idle VMID to use, wait till one becomes available */
517 if (&idle
->list
== &id_mgr
->ids_lru
) {
518 u64 fence_context
= adev
->vm_manager
.fence_context
+ ring
->idx
;
519 unsigned seqno
= ++adev
->vm_manager
.seqno
[ring
->idx
];
520 struct dma_fence_array
*array
;
523 for (j
= 0; j
< i
; ++j
)
524 dma_fence_get(fences
[j
]);
526 array
= dma_fence_array_create(i
, fences
, fence_context
,
529 for (j
= 0; j
< i
; ++j
)
530 dma_fence_put(fences
[j
]);
537 r
= amdgpu_sync_fence(ring
->adev
, sync
, &array
->base
);
538 dma_fence_put(&array
->base
);
542 mutex_unlock(&id_mgr
->lock
);
548 job
->vm_needs_flush
= false;
549 /* Check if we can use a VMID already assigned to this VM */
550 list_for_each_entry_reverse(id
, &id_mgr
->ids_lru
, list
) {
551 struct dma_fence
*flushed
;
552 bool needs_flush
= false;
554 /* Check all the prerequisites to using this VMID */
555 if (amdgpu_vm_had_gpu_reset(adev
, id
))
558 if (atomic64_read(&id
->owner
) != vm
->client_id
)
561 if (job
->vm_pd_addr
!= id
->pd_gpu_addr
)
564 if (!id
->last_flush
||
565 (id
->last_flush
->context
!= fence_context
&&
566 !dma_fence_is_signaled(id
->last_flush
)))
569 flushed
= id
->flushed_updates
;
570 if (updates
&& (!flushed
|| dma_fence_is_later(updates
, flushed
)))
573 /* Concurrent flushes are only possible starting with Vega10 */
574 if (adev
->asic_type
< CHIP_VEGA10
&& needs_flush
)
577 /* Good we can use this VMID. Remember this submission as
580 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
584 if (updates
&& (!flushed
|| dma_fence_is_later(updates
, flushed
))) {
585 dma_fence_put(id
->flushed_updates
);
586 id
->flushed_updates
= dma_fence_get(updates
);
592 goto no_flush_needed
;
596 /* Still no ID to use? Then use the idle one found earlier */
599 /* Remember this submission as user of the VMID */
600 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
604 id
->pd_gpu_addr
= job
->vm_pd_addr
;
605 dma_fence_put(id
->flushed_updates
);
606 id
->flushed_updates
= dma_fence_get(updates
);
607 atomic64_set(&id
->owner
, vm
->client_id
);
610 job
->vm_needs_flush
= true;
611 dma_fence_put(id
->last_flush
);
612 id
->last_flush
= NULL
;
615 list_move_tail(&id
->list
, &id_mgr
->ids_lru
);
617 job
->vm_id
= id
- id_mgr
->ids
;
618 trace_amdgpu_vm_grab_id(vm
, ring
, job
);
621 mutex_unlock(&id_mgr
->lock
);
625 static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device
*adev
,
626 struct amdgpu_vm
*vm
,
629 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
631 mutex_lock(&id_mgr
->lock
);
632 if (vm
->reserved_vmid
[vmhub
]) {
633 list_add(&vm
->reserved_vmid
[vmhub
]->list
,
635 vm
->reserved_vmid
[vmhub
] = NULL
;
636 atomic_dec(&id_mgr
->reserved_vmid_num
);
638 mutex_unlock(&id_mgr
->lock
);
641 static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device
*adev
,
642 struct amdgpu_vm
*vm
,
645 struct amdgpu_vm_id_manager
*id_mgr
;
646 struct amdgpu_vm_id
*idle
;
649 id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
650 mutex_lock(&id_mgr
->lock
);
651 if (vm
->reserved_vmid
[vmhub
])
653 if (atomic_inc_return(&id_mgr
->reserved_vmid_num
) >
654 AMDGPU_VM_MAX_RESERVED_VMID
) {
655 DRM_ERROR("Over limitation of reserved vmid\n");
656 atomic_dec(&id_mgr
->reserved_vmid_num
);
660 /* Select the first entry VMID */
661 idle
= list_first_entry(&id_mgr
->ids_lru
, struct amdgpu_vm_id
, list
);
662 list_del_init(&idle
->list
);
663 vm
->reserved_vmid
[vmhub
] = idle
;
664 mutex_unlock(&id_mgr
->lock
);
668 mutex_unlock(&id_mgr
->lock
);
673 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
675 * @adev: amdgpu_device pointer
677 void amdgpu_vm_check_compute_bug(struct amdgpu_device
*adev
)
679 const struct amdgpu_ip_block
*ip_block
;
680 bool has_compute_vm_bug
;
681 struct amdgpu_ring
*ring
;
684 has_compute_vm_bug
= false;
686 ip_block
= amdgpu_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
688 /* Compute has a VM bug for GFX version < 7.
689 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
690 if (ip_block
->version
->major
<= 7)
691 has_compute_vm_bug
= true;
692 else if (ip_block
->version
->major
== 8)
693 if (adev
->gfx
.mec_fw_version
< 673)
694 has_compute_vm_bug
= true;
697 for (i
= 0; i
< adev
->num_rings
; i
++) {
698 ring
= adev
->rings
[i
];
699 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
)
700 /* only compute rings */
701 ring
->has_compute_vm_bug
= has_compute_vm_bug
;
703 ring
->has_compute_vm_bug
= false;
707 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring
*ring
,
708 struct amdgpu_job
*job
)
710 struct amdgpu_device
*adev
= ring
->adev
;
711 unsigned vmhub
= ring
->funcs
->vmhub
;
712 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
713 struct amdgpu_vm_id
*id
;
714 bool gds_switch_needed
;
715 bool vm_flush_needed
= job
->vm_needs_flush
|| ring
->has_compute_vm_bug
;
719 id
= &id_mgr
->ids
[job
->vm_id
];
720 gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
721 id
->gds_base
!= job
->gds_base
||
722 id
->gds_size
!= job
->gds_size
||
723 id
->gws_base
!= job
->gws_base
||
724 id
->gws_size
!= job
->gws_size
||
725 id
->oa_base
!= job
->oa_base
||
726 id
->oa_size
!= job
->oa_size
);
728 if (amdgpu_vm_had_gpu_reset(adev
, id
))
731 return vm_flush_needed
|| gds_switch_needed
;
734 static bool amdgpu_vm_is_large_bar(struct amdgpu_device
*adev
)
736 return (adev
->mc
.real_vram_size
== adev
->mc
.visible_vram_size
);
740 * amdgpu_vm_flush - hardware flush the vm
742 * @ring: ring to use for flush
743 * @vm_id: vmid number to use
744 * @pd_addr: address of the page directory
746 * Emit a VM flush when it is necessary.
748 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
)
750 struct amdgpu_device
*adev
= ring
->adev
;
751 unsigned vmhub
= ring
->funcs
->vmhub
;
752 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
753 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[job
->vm_id
];
754 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
755 id
->gds_base
!= job
->gds_base
||
756 id
->gds_size
!= job
->gds_size
||
757 id
->gws_base
!= job
->gws_base
||
758 id
->gws_size
!= job
->gws_size
||
759 id
->oa_base
!= job
->oa_base
||
760 id
->oa_size
!= job
->oa_size
);
761 bool vm_flush_needed
= job
->vm_needs_flush
;
762 unsigned patch_offset
= 0;
765 if (amdgpu_vm_had_gpu_reset(adev
, id
)) {
766 gds_switch_needed
= true;
767 vm_flush_needed
= true;
770 if (!vm_flush_needed
&& !gds_switch_needed
)
773 if (ring
->funcs
->init_cond_exec
)
774 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
776 if (ring
->funcs
->emit_vm_flush
&& vm_flush_needed
) {
777 struct dma_fence
*fence
;
779 trace_amdgpu_vm_flush(ring
, job
->vm_id
, job
->vm_pd_addr
);
780 amdgpu_ring_emit_vm_flush(ring
, job
->vm_id
, job
->vm_pd_addr
);
782 r
= amdgpu_fence_emit(ring
, &fence
);
786 mutex_lock(&id_mgr
->lock
);
787 dma_fence_put(id
->last_flush
);
788 id
->last_flush
= fence
;
789 id
->current_gpu_reset_count
= atomic_read(&adev
->gpu_reset_counter
);
790 mutex_unlock(&id_mgr
->lock
);
793 if (ring
->funcs
->emit_gds_switch
&& gds_switch_needed
) {
794 id
->gds_base
= job
->gds_base
;
795 id
->gds_size
= job
->gds_size
;
796 id
->gws_base
= job
->gws_base
;
797 id
->gws_size
= job
->gws_size
;
798 id
->oa_base
= job
->oa_base
;
799 id
->oa_size
= job
->oa_size
;
800 amdgpu_ring_emit_gds_switch(ring
, job
->vm_id
, job
->gds_base
,
801 job
->gds_size
, job
->gws_base
,
802 job
->gws_size
, job
->oa_base
,
806 if (ring
->funcs
->patch_cond_exec
)
807 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
809 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
810 if (ring
->funcs
->emit_switch_buffer
) {
811 amdgpu_ring_emit_switch_buffer(ring
);
812 amdgpu_ring_emit_switch_buffer(ring
);
818 * amdgpu_vm_reset_id - reset VMID to zero
820 * @adev: amdgpu device structure
821 * @vm_id: vmid number to use
823 * Reset saved GDW, GWS and OA to force switch on next flush.
825 void amdgpu_vm_reset_id(struct amdgpu_device
*adev
, unsigned vmhub
,
828 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
829 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[vmid
];
831 atomic64_set(&id
->owner
, 0);
841 * amdgpu_vm_reset_all_id - reset VMID to zero
843 * @adev: amdgpu device structure
845 * Reset VMID to force flush on next use
847 void amdgpu_vm_reset_all_ids(struct amdgpu_device
*adev
)
851 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
852 struct amdgpu_vm_id_manager
*id_mgr
=
853 &adev
->vm_manager
.id_mgr
[i
];
855 for (j
= 1; j
< id_mgr
->num_ids
; ++j
)
856 amdgpu_vm_reset_id(adev
, i
, j
);
861 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
864 * @bo: requested buffer object
866 * Find @bo inside the requested vm.
867 * Search inside the @bos vm list for the requested vm
868 * Returns the found bo_va or NULL if none is found
870 * Object has to be reserved!
872 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
873 struct amdgpu_bo
*bo
)
875 struct amdgpu_bo_va
*bo_va
;
877 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
878 if (bo_va
->vm
== vm
) {
886 * amdgpu_vm_do_set_ptes - helper to call the right asic function
888 * @params: see amdgpu_pte_update_params definition
889 * @pe: addr of the page entry
890 * @addr: dst addr to write into pe
891 * @count: number of page entries to update
892 * @incr: increase next addr by incr bytes
893 * @flags: hw access flags
895 * Traces the parameters and calls the right asic functions
896 * to setup the page table using the DMA.
898 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params
*params
,
899 uint64_t pe
, uint64_t addr
,
900 unsigned count
, uint32_t incr
,
903 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
906 amdgpu_vm_write_pte(params
->adev
, params
->ib
, pe
,
907 addr
| flags
, count
, incr
);
910 amdgpu_vm_set_pte_pde(params
->adev
, params
->ib
, pe
, addr
,
916 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
918 * @params: see amdgpu_pte_update_params definition
919 * @pe: addr of the page entry
920 * @addr: dst addr to write into pe
921 * @count: number of page entries to update
922 * @incr: increase next addr by incr bytes
923 * @flags: hw access flags
925 * Traces the parameters and calls the DMA function to copy the PTEs.
927 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params
*params
,
928 uint64_t pe
, uint64_t addr
,
929 unsigned count
, uint32_t incr
,
932 uint64_t src
= (params
->src
+ (addr
>> 12) * 8);
935 trace_amdgpu_vm_copy_ptes(pe
, src
, count
);
937 amdgpu_vm_copy_pte(params
->adev
, params
->ib
, pe
, src
, count
);
941 * amdgpu_vm_map_gart - Resolve gart mapping of addr
943 * @pages_addr: optional DMA address to use for lookup
944 * @addr: the unmapped addr
946 * Look up the physical address of the page that the pte resolves
947 * to and return the pointer for the page table entry.
949 static uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
953 /* page table offset */
954 result
= pages_addr
[addr
>> PAGE_SHIFT
];
956 /* in case cpu page size != gpu page size*/
957 result
|= addr
& (~PAGE_MASK
);
959 result
&= 0xFFFFFFFFFFFFF000ULL
;
965 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
967 * @params: see amdgpu_pte_update_params definition
968 * @pe: kmap addr of the page entry
969 * @addr: dst addr to write into pe
970 * @count: number of page entries to update
971 * @incr: increase next addr by incr bytes
972 * @flags: hw access flags
974 * Write count number of PT/PD entries directly.
976 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params
*params
,
977 uint64_t pe
, uint64_t addr
,
978 unsigned count
, uint32_t incr
,
984 for (i
= 0; i
< count
; i
++) {
985 value
= params
->pages_addr
?
986 amdgpu_vm_map_gart(params
->pages_addr
, addr
) :
988 amdgpu_gart_set_pte_pde(params
->adev
, (void *)(uintptr_t)pe
,
995 amdgpu_gart_flush_gpu_tlb(params
->adev
, 0);
998 static int amdgpu_vm_bo_wait(struct amdgpu_device
*adev
, struct amdgpu_bo
*bo
)
1000 struct amdgpu_sync sync
;
1003 amdgpu_sync_create(&sync
);
1004 amdgpu_sync_resv(adev
, &sync
, bo
->tbo
.resv
, AMDGPU_FENCE_OWNER_VM
);
1005 r
= amdgpu_sync_wait(&sync
, true);
1006 amdgpu_sync_free(&sync
);
1012 * amdgpu_vm_update_level - update a single level in the hierarchy
1014 * @adev: amdgpu_device pointer
1016 * @parent: parent directory
1018 * Makes sure all entries in @parent are up to date.
1019 * Returns 0 for success, error for failure.
1021 static int amdgpu_vm_update_level(struct amdgpu_device
*adev
,
1022 struct amdgpu_vm
*vm
,
1023 struct amdgpu_vm_pt
*parent
,
1026 struct amdgpu_bo
*shadow
;
1027 struct amdgpu_ring
*ring
= NULL
;
1028 uint64_t pd_addr
, shadow_addr
= 0;
1029 uint32_t incr
= amdgpu_vm_bo_size(adev
, level
+ 1);
1030 uint64_t last_pde
= ~0, last_pt
= ~0, last_shadow
= ~0;
1031 unsigned count
= 0, pt_idx
, ndw
= 0;
1032 struct amdgpu_job
*job
;
1033 struct amdgpu_pte_update_params params
;
1034 struct dma_fence
*fence
= NULL
;
1038 if (!parent
->entries
)
1041 memset(¶ms
, 0, sizeof(params
));
1043 shadow
= parent
->bo
->shadow
;
1045 WARN_ON(vm
->use_cpu_for_update
&& shadow
);
1046 if (vm
->use_cpu_for_update
&& !shadow
) {
1047 r
= amdgpu_bo_kmap(parent
->bo
, (void **)&pd_addr
);
1050 r
= amdgpu_vm_bo_wait(adev
, parent
->bo
);
1052 amdgpu_bo_kunmap(parent
->bo
);
1055 params
.func
= amdgpu_vm_cpu_set_ptes
;
1058 r
= amdgpu_ttm_bind(&shadow
->tbo
, &shadow
->tbo
.mem
);
1062 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
,
1068 /* assume the worst case */
1069 ndw
+= parent
->last_entry_used
* 6;
1071 pd_addr
= amdgpu_bo_gpu_offset(parent
->bo
);
1074 shadow_addr
= amdgpu_bo_gpu_offset(shadow
);
1080 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1084 params
.ib
= &job
->ibs
[0];
1085 params
.func
= amdgpu_vm_do_set_ptes
;
1089 /* walk over the address space and update the directory */
1090 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
1091 struct amdgpu_bo
*bo
= parent
->entries
[pt_idx
].bo
;
1098 struct amdgpu_bo
*pt_shadow
= bo
->shadow
;
1100 r
= amdgpu_ttm_bind(&pt_shadow
->tbo
,
1101 &pt_shadow
->tbo
.mem
);
1106 pt
= amdgpu_bo_gpu_offset(bo
);
1107 pt
= amdgpu_gart_get_vm_pde(adev
, pt
);
1108 if (parent
->entries
[pt_idx
].addr
== pt
)
1111 parent
->entries
[pt_idx
].addr
= pt
;
1113 pde
= pd_addr
+ pt_idx
* 8;
1114 if (((last_pde
+ 8 * count
) != pde
) ||
1115 ((last_pt
+ incr
* count
) != pt
) ||
1116 (count
== AMDGPU_VM_MAX_UPDATE_SIZE
)) {
1120 params
.func(¶ms
,
1126 params
.func(¶ms
, last_pde
,
1127 last_pt
, count
, incr
,
1133 last_shadow
= shadow_addr
+ pt_idx
* 8;
1141 if (vm
->root
.bo
->shadow
)
1142 params
.func(¶ms
, last_shadow
, last_pt
,
1143 count
, incr
, AMDGPU_PTE_VALID
);
1145 params
.func(¶ms
, last_pde
, last_pt
,
1146 count
, incr
, AMDGPU_PTE_VALID
);
1149 if (params
.func
== amdgpu_vm_cpu_set_ptes
)
1150 amdgpu_bo_kunmap(parent
->bo
);
1151 else if (params
.ib
->length_dw
== 0) {
1152 amdgpu_job_free(job
);
1154 amdgpu_ring_pad_ib(ring
, params
.ib
);
1155 amdgpu_sync_resv(adev
, &job
->sync
, parent
->bo
->tbo
.resv
,
1156 AMDGPU_FENCE_OWNER_VM
);
1158 amdgpu_sync_resv(adev
, &job
->sync
, shadow
->tbo
.resv
,
1159 AMDGPU_FENCE_OWNER_VM
);
1161 WARN_ON(params
.ib
->length_dw
> ndw
);
1162 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
1163 AMDGPU_FENCE_OWNER_VM
, &fence
);
1167 amdgpu_bo_fence(parent
->bo
, fence
, true);
1168 dma_fence_put(vm
->last_dir_update
);
1169 vm
->last_dir_update
= dma_fence_get(fence
);
1170 dma_fence_put(fence
);
1173 * Recurse into the subdirectories. This recursion is harmless because
1174 * we only have a maximum of 5 layers.
1176 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
1177 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
1182 r
= amdgpu_vm_update_level(adev
, vm
, entry
, level
+ 1);
1190 amdgpu_job_free(job
);
1195 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1197 * @parent: parent PD
1199 * Mark all PD level as invalid after an error.
1201 static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt
*parent
)
1206 * Recurse into the subdirectories. This recursion is harmless because
1207 * we only have a maximum of 5 layers.
1209 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
1210 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
1215 entry
->addr
= ~0ULL;
1216 amdgpu_vm_invalidate_level(entry
);
1221 * amdgpu_vm_update_directories - make sure that all directories are valid
1223 * @adev: amdgpu_device pointer
1226 * Makes sure all directories are up to date.
1227 * Returns 0 for success, error for failure.
1229 int amdgpu_vm_update_directories(struct amdgpu_device
*adev
,
1230 struct amdgpu_vm
*vm
)
1234 r
= amdgpu_vm_update_level(adev
, vm
, &vm
->root
, 0);
1236 amdgpu_vm_invalidate_level(&vm
->root
);
1242 * amdgpu_vm_find_pt - find the page table for an address
1244 * @p: see amdgpu_pte_update_params definition
1245 * @addr: virtual address in question
1247 * Find the page table BO for a virtual address, return NULL when none found.
1249 static struct amdgpu_bo
*amdgpu_vm_get_pt(struct amdgpu_pte_update_params
*p
,
1252 struct amdgpu_vm_pt
*entry
= &p
->vm
->root
;
1253 unsigned idx
, level
= p
->adev
->vm_manager
.num_level
;
1255 while (entry
->entries
) {
1256 idx
= addr
>> (p
->adev
->vm_manager
.block_size
* level
--);
1257 idx
%= amdgpu_bo_size(entry
->bo
) / 8;
1258 entry
= &entry
->entries
[idx
];
1268 * amdgpu_vm_update_ptes - make sure that page tables are valid
1270 * @params: see amdgpu_pte_update_params definition
1272 * @start: start of GPU address range
1273 * @end: end of GPU address range
1274 * @dst: destination address to map to, the next dst inside the function
1275 * @flags: mapping flags
1277 * Update the page tables in the range @start - @end.
1278 * Returns 0 for success, -EINVAL for failure.
1280 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params
*params
,
1281 uint64_t start
, uint64_t end
,
1282 uint64_t dst
, uint64_t flags
)
1284 struct amdgpu_device
*adev
= params
->adev
;
1285 const uint64_t mask
= AMDGPU_VM_PTE_COUNT(adev
) - 1;
1287 uint64_t addr
, pe_start
;
1288 struct amdgpu_bo
*pt
;
1291 bool use_cpu_update
= (params
->func
== amdgpu_vm_cpu_set_ptes
);
1294 /* walk over the address space and update the page tables */
1295 for (addr
= start
; addr
< end
; addr
+= nptes
) {
1296 pt
= amdgpu_vm_get_pt(params
, addr
);
1298 pr_err("PT not found, aborting update_ptes\n");
1302 if (params
->shadow
) {
1303 if (WARN_ONCE(use_cpu_update
,
1304 "CPU VM update doesn't suuport shadow pages"))
1312 if ((addr
& ~mask
) == (end
& ~mask
))
1315 nptes
= AMDGPU_VM_PTE_COUNT(adev
) - (addr
& mask
);
1317 if (use_cpu_update
) {
1318 r
= amdgpu_bo_kmap(pt
, (void *)&pe_start
);
1322 pe_start
= amdgpu_bo_gpu_offset(pt
);
1324 pe_start
+= (addr
& mask
) * 8;
1326 params
->func(params
, pe_start
, dst
, nptes
,
1327 AMDGPU_GPU_PAGE_SIZE
, flags
);
1329 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
1332 amdgpu_bo_kunmap(pt
);
1339 * amdgpu_vm_frag_ptes - add fragment information to PTEs
1341 * @params: see amdgpu_pte_update_params definition
1343 * @start: first PTE to handle
1344 * @end: last PTE to handle
1345 * @dst: addr those PTEs should point to
1346 * @flags: hw mapping flags
1347 * Returns 0 for success, -EINVAL for failure.
1349 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params
*params
,
1350 uint64_t start
, uint64_t end
,
1351 uint64_t dst
, uint64_t flags
)
1356 * The MC L1 TLB supports variable sized pages, based on a fragment
1357 * field in the PTE. When this field is set to a non-zero value, page
1358 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1359 * flags are considered valid for all PTEs within the fragment range
1360 * and corresponding mappings are assumed to be physically contiguous.
1362 * The L1 TLB can store a single PTE for the whole fragment,
1363 * significantly increasing the space available for translation
1364 * caching. This leads to large improvements in throughput when the
1365 * TLB is under pressure.
1367 * The L2 TLB distributes small and large fragments into two
1368 * asymmetric partitions. The large fragment cache is significantly
1369 * larger. Thus, we try to use large fragments wherever possible.
1370 * Userspace can support this by aligning virtual base address and
1371 * allocation size to the fragment size.
1374 /* SI and newer are optimized for 64KB */
1375 uint64_t frag_flags
= AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG
);
1376 uint64_t frag_align
= 1 << AMDGPU_LOG2_PAGES_PER_FRAG
;
1378 uint64_t frag_start
= ALIGN(start
, frag_align
);
1379 uint64_t frag_end
= end
& ~(frag_align
- 1);
1381 /* system pages are non continuously */
1382 if (params
->src
|| !(flags
& AMDGPU_PTE_VALID
) ||
1383 (frag_start
>= frag_end
))
1384 return amdgpu_vm_update_ptes(params
, start
, end
, dst
, flags
);
1386 /* handle the 4K area at the beginning */
1387 if (start
!= frag_start
) {
1388 r
= amdgpu_vm_update_ptes(params
, start
, frag_start
,
1392 dst
+= (frag_start
- start
) * AMDGPU_GPU_PAGE_SIZE
;
1395 /* handle the area in the middle */
1396 r
= amdgpu_vm_update_ptes(params
, frag_start
, frag_end
, dst
,
1397 flags
| frag_flags
);
1401 /* handle the 4K area at the end */
1402 if (frag_end
!= end
) {
1403 dst
+= (frag_end
- frag_start
) * AMDGPU_GPU_PAGE_SIZE
;
1404 r
= amdgpu_vm_update_ptes(params
, frag_end
, end
, dst
, flags
);
1410 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1412 * @adev: amdgpu_device pointer
1413 * @exclusive: fence we need to sync to
1414 * @src: address where to copy page table entries from
1415 * @pages_addr: DMA addresses to use for mapping
1417 * @start: start of mapped range
1418 * @last: last mapped entry
1419 * @flags: flags for the entries
1420 * @addr: addr to set the area to
1421 * @fence: optional resulting fence
1423 * Fill in the page table entries between @start and @last.
1424 * Returns 0 for success, -EINVAL for failure.
1426 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
1427 struct dma_fence
*exclusive
,
1429 dma_addr_t
*pages_addr
,
1430 struct amdgpu_vm
*vm
,
1431 uint64_t start
, uint64_t last
,
1432 uint64_t flags
, uint64_t addr
,
1433 struct dma_fence
**fence
)
1435 struct amdgpu_ring
*ring
;
1436 void *owner
= AMDGPU_FENCE_OWNER_VM
;
1437 unsigned nptes
, ncmds
, ndw
;
1438 struct amdgpu_job
*job
;
1439 struct amdgpu_pte_update_params params
;
1440 struct dma_fence
*f
= NULL
;
1443 memset(¶ms
, 0, sizeof(params
));
1448 if (vm
->use_cpu_for_update
) {
1449 /* params.src is used as flag to indicate system Memory */
1453 /* Wait for PT BOs to be free. PTs share the same resv. object
1456 r
= amdgpu_vm_bo_wait(adev
, vm
->root
.bo
);
1460 params
.func
= amdgpu_vm_cpu_set_ptes
;
1461 params
.pages_addr
= pages_addr
;
1462 params
.shadow
= false;
1463 return amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1,
1467 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
1469 /* sync to everything on unmapping */
1470 if (!(flags
& AMDGPU_PTE_VALID
))
1471 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
1473 nptes
= last
- start
+ 1;
1476 * reserve space for one command every (1 << BLOCK_SIZE)
1477 * entries or 2k dwords (whatever is smaller)
1479 ncmds
= (nptes
>> min(adev
->vm_manager
.block_size
, 11u)) + 1;
1485 /* only copy commands needed */
1488 params
.func
= amdgpu_vm_do_copy_ptes
;
1490 } else if (pages_addr
) {
1491 /* copy commands needed */
1497 params
.func
= amdgpu_vm_do_copy_ptes
;
1500 /* set page commands needed */
1503 /* two extra commands for begin/end of fragment */
1506 params
.func
= amdgpu_vm_do_set_ptes
;
1509 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1513 params
.ib
= &job
->ibs
[0];
1515 if (!src
&& pages_addr
) {
1519 /* Put the PTEs at the end of the IB. */
1520 i
= ndw
- nptes
* 2;
1521 pte
= (uint64_t *)&(job
->ibs
->ptr
[i
]);
1522 params
.src
= job
->ibs
->gpu_addr
+ i
* 4;
1524 for (i
= 0; i
< nptes
; ++i
) {
1525 pte
[i
] = amdgpu_vm_map_gart(pages_addr
, addr
+ i
*
1526 AMDGPU_GPU_PAGE_SIZE
);
1532 r
= amdgpu_sync_fence(adev
, &job
->sync
, exclusive
);
1536 r
= amdgpu_sync_resv(adev
, &job
->sync
, vm
->root
.bo
->tbo
.resv
,
1541 r
= reservation_object_reserve_shared(vm
->root
.bo
->tbo
.resv
);
1545 params
.shadow
= true;
1546 r
= amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1, addr
, flags
);
1549 params
.shadow
= false;
1550 r
= amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1, addr
, flags
);
1554 amdgpu_ring_pad_ib(ring
, params
.ib
);
1555 WARN_ON(params
.ib
->length_dw
> ndw
);
1556 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
1557 AMDGPU_FENCE_OWNER_VM
, &f
);
1561 amdgpu_bo_fence(vm
->root
.bo
, f
, true);
1562 dma_fence_put(*fence
);
1567 amdgpu_job_free(job
);
1572 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1574 * @adev: amdgpu_device pointer
1575 * @exclusive: fence we need to sync to
1576 * @gtt_flags: flags as they are used for GTT
1577 * @pages_addr: DMA addresses to use for mapping
1579 * @mapping: mapped range and flags to use for the update
1580 * @flags: HW flags for the mapping
1581 * @nodes: array of drm_mm_nodes with the MC addresses
1582 * @fence: optional resulting fence
1584 * Split the mapping into smaller chunks so that each update fits
1586 * Returns 0 for success, -EINVAL for failure.
1588 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device
*adev
,
1589 struct dma_fence
*exclusive
,
1591 dma_addr_t
*pages_addr
,
1592 struct amdgpu_vm
*vm
,
1593 struct amdgpu_bo_va_mapping
*mapping
,
1595 struct drm_mm_node
*nodes
,
1596 struct dma_fence
**fence
)
1598 uint64_t pfn
, src
= 0, start
= mapping
->start
;
1601 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1602 * but in case of something, we filter the flags in first place
1604 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1605 flags
&= ~AMDGPU_PTE_READABLE
;
1606 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1607 flags
&= ~AMDGPU_PTE_WRITEABLE
;
1609 flags
&= ~AMDGPU_PTE_EXECUTABLE
;
1610 flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
1612 flags
&= ~AMDGPU_PTE_MTYPE_MASK
;
1613 flags
|= (mapping
->flags
& AMDGPU_PTE_MTYPE_MASK
);
1615 if ((mapping
->flags
& AMDGPU_PTE_PRT
) &&
1616 (adev
->asic_type
>= CHIP_VEGA10
)) {
1617 flags
|= AMDGPU_PTE_PRT
;
1618 flags
&= ~AMDGPU_PTE_VALID
;
1621 trace_amdgpu_vm_bo_update(mapping
);
1623 pfn
= mapping
->offset
>> PAGE_SHIFT
;
1625 while (pfn
>= nodes
->size
) {
1632 uint64_t max_entries
;
1633 uint64_t addr
, last
;
1636 addr
= nodes
->start
<< PAGE_SHIFT
;
1637 max_entries
= (nodes
->size
- pfn
) *
1638 (PAGE_SIZE
/ AMDGPU_GPU_PAGE_SIZE
);
1641 max_entries
= S64_MAX
;
1645 if (flags
== gtt_flags
)
1646 src
= adev
->gart
.table_addr
+
1647 (addr
>> AMDGPU_GPU_PAGE_SHIFT
) * 8;
1649 max_entries
= min(max_entries
, 16ull * 1024ull);
1651 } else if (flags
& AMDGPU_PTE_VALID
) {
1652 addr
+= adev
->vm_manager
.vram_base_offset
;
1654 addr
+= pfn
<< PAGE_SHIFT
;
1656 last
= min((uint64_t)mapping
->last
, start
+ max_entries
- 1);
1657 r
= amdgpu_vm_bo_update_mapping(adev
, exclusive
,
1658 src
, pages_addr
, vm
,
1659 start
, last
, flags
, addr
,
1664 pfn
+= last
- start
+ 1;
1665 if (nodes
&& nodes
->size
== pfn
) {
1671 } while (unlikely(start
!= mapping
->last
+ 1));
1677 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1679 * @adev: amdgpu_device pointer
1680 * @bo_va: requested BO and VM object
1681 * @clear: if true clear the entries
1683 * Fill in the page table entries for @bo_va.
1684 * Returns 0 for success, -EINVAL for failure.
1686 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
1687 struct amdgpu_bo_va
*bo_va
,
1690 struct amdgpu_vm
*vm
= bo_va
->vm
;
1691 struct amdgpu_bo_va_mapping
*mapping
;
1692 dma_addr_t
*pages_addr
= NULL
;
1693 uint64_t gtt_flags
, flags
;
1694 struct ttm_mem_reg
*mem
;
1695 struct drm_mm_node
*nodes
;
1696 struct dma_fence
*exclusive
;
1699 if (clear
|| !bo_va
->bo
) {
1704 struct ttm_dma_tt
*ttm
;
1706 mem
= &bo_va
->bo
->tbo
.mem
;
1707 nodes
= mem
->mm_node
;
1708 if (mem
->mem_type
== TTM_PL_TT
) {
1709 ttm
= container_of(bo_va
->bo
->tbo
.ttm
, struct
1711 pages_addr
= ttm
->dma_address
;
1713 exclusive
= reservation_object_get_excl(bo_va
->bo
->tbo
.resv
);
1717 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo_va
->bo
->tbo
.ttm
, mem
);
1718 gtt_flags
= (amdgpu_ttm_is_bound(bo_va
->bo
->tbo
.ttm
) &&
1719 adev
== amdgpu_ttm_adev(bo_va
->bo
->tbo
.bdev
)) ?
1726 spin_lock(&vm
->status_lock
);
1727 if (!list_empty(&bo_va
->vm_status
))
1728 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1729 spin_unlock(&vm
->status_lock
);
1731 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1732 r
= amdgpu_vm_bo_split_mapping(adev
, exclusive
,
1733 gtt_flags
, pages_addr
, vm
,
1734 mapping
, flags
, nodes
,
1735 &bo_va
->last_pt_update
);
1740 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1741 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1742 trace_amdgpu_vm_bo_mapping(mapping
);
1744 list_for_each_entry(mapping
, &bo_va
->invalids
, list
)
1745 trace_amdgpu_vm_bo_mapping(mapping
);
1748 spin_lock(&vm
->status_lock
);
1749 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1750 list_del_init(&bo_va
->vm_status
);
1752 list_add(&bo_va
->vm_status
, &vm
->cleared
);
1753 spin_unlock(&vm
->status_lock
);
1759 * amdgpu_vm_update_prt_state - update the global PRT state
1761 static void amdgpu_vm_update_prt_state(struct amdgpu_device
*adev
)
1763 unsigned long flags
;
1766 spin_lock_irqsave(&adev
->vm_manager
.prt_lock
, flags
);
1767 enable
= !!atomic_read(&adev
->vm_manager
.num_prt_users
);
1768 adev
->gart
.gart_funcs
->set_prt(adev
, enable
);
1769 spin_unlock_irqrestore(&adev
->vm_manager
.prt_lock
, flags
);
1773 * amdgpu_vm_prt_get - add a PRT user
1775 static void amdgpu_vm_prt_get(struct amdgpu_device
*adev
)
1777 if (!adev
->gart
.gart_funcs
->set_prt
)
1780 if (atomic_inc_return(&adev
->vm_manager
.num_prt_users
) == 1)
1781 amdgpu_vm_update_prt_state(adev
);
1785 * amdgpu_vm_prt_put - drop a PRT user
1787 static void amdgpu_vm_prt_put(struct amdgpu_device
*adev
)
1789 if (atomic_dec_return(&adev
->vm_manager
.num_prt_users
) == 0)
1790 amdgpu_vm_update_prt_state(adev
);
1794 * amdgpu_vm_prt_cb - callback for updating the PRT status
1796 static void amdgpu_vm_prt_cb(struct dma_fence
*fence
, struct dma_fence_cb
*_cb
)
1798 struct amdgpu_prt_cb
*cb
= container_of(_cb
, struct amdgpu_prt_cb
, cb
);
1800 amdgpu_vm_prt_put(cb
->adev
);
1805 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1807 static void amdgpu_vm_add_prt_cb(struct amdgpu_device
*adev
,
1808 struct dma_fence
*fence
)
1810 struct amdgpu_prt_cb
*cb
;
1812 if (!adev
->gart
.gart_funcs
->set_prt
)
1815 cb
= kmalloc(sizeof(struct amdgpu_prt_cb
), GFP_KERNEL
);
1817 /* Last resort when we are OOM */
1819 dma_fence_wait(fence
, false);
1821 amdgpu_vm_prt_put(adev
);
1824 if (!fence
|| dma_fence_add_callback(fence
, &cb
->cb
,
1826 amdgpu_vm_prt_cb(fence
, &cb
->cb
);
1831 * amdgpu_vm_free_mapping - free a mapping
1833 * @adev: amdgpu_device pointer
1835 * @mapping: mapping to be freed
1836 * @fence: fence of the unmap operation
1838 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1840 static void amdgpu_vm_free_mapping(struct amdgpu_device
*adev
,
1841 struct amdgpu_vm
*vm
,
1842 struct amdgpu_bo_va_mapping
*mapping
,
1843 struct dma_fence
*fence
)
1845 if (mapping
->flags
& AMDGPU_PTE_PRT
)
1846 amdgpu_vm_add_prt_cb(adev
, fence
);
1851 * amdgpu_vm_prt_fini - finish all prt mappings
1853 * @adev: amdgpu_device pointer
1856 * Register a cleanup callback to disable PRT support after VM dies.
1858 static void amdgpu_vm_prt_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1860 struct reservation_object
*resv
= vm
->root
.bo
->tbo
.resv
;
1861 struct dma_fence
*excl
, **shared
;
1862 unsigned i
, shared_count
;
1865 r
= reservation_object_get_fences_rcu(resv
, &excl
,
1866 &shared_count
, &shared
);
1868 /* Not enough memory to grab the fence list, as last resort
1869 * block for all the fences to complete.
1871 reservation_object_wait_timeout_rcu(resv
, true, false,
1872 MAX_SCHEDULE_TIMEOUT
);
1876 /* Add a callback for each fence in the reservation object */
1877 amdgpu_vm_prt_get(adev
);
1878 amdgpu_vm_add_prt_cb(adev
, excl
);
1880 for (i
= 0; i
< shared_count
; ++i
) {
1881 amdgpu_vm_prt_get(adev
);
1882 amdgpu_vm_add_prt_cb(adev
, shared
[i
]);
1889 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1891 * @adev: amdgpu_device pointer
1893 * @fence: optional resulting fence (unchanged if no work needed to be done
1894 * or if an error occurred)
1896 * Make sure all freed BOs are cleared in the PT.
1897 * Returns 0 for success.
1899 * PTs have to be reserved and mutex must be locked!
1901 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
1902 struct amdgpu_vm
*vm
,
1903 struct dma_fence
**fence
)
1905 struct amdgpu_bo_va_mapping
*mapping
;
1906 struct dma_fence
*f
= NULL
;
1909 while (!list_empty(&vm
->freed
)) {
1910 mapping
= list_first_entry(&vm
->freed
,
1911 struct amdgpu_bo_va_mapping
, list
);
1912 list_del(&mapping
->list
);
1914 r
= amdgpu_vm_bo_update_mapping(adev
, NULL
, 0, NULL
, vm
,
1915 mapping
->start
, mapping
->last
,
1917 amdgpu_vm_free_mapping(adev
, vm
, mapping
, f
);
1925 dma_fence_put(*fence
);
1936 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1938 * @adev: amdgpu_device pointer
1941 * Make sure all invalidated BOs are cleared in the PT.
1942 * Returns 0 for success.
1944 * PTs have to be reserved and mutex must be locked!
1946 int amdgpu_vm_clear_invalids(struct amdgpu_device
*adev
,
1947 struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
1949 struct amdgpu_bo_va
*bo_va
= NULL
;
1952 spin_lock(&vm
->status_lock
);
1953 while (!list_empty(&vm
->invalidated
)) {
1954 bo_va
= list_first_entry(&vm
->invalidated
,
1955 struct amdgpu_bo_va
, vm_status
);
1956 spin_unlock(&vm
->status_lock
);
1958 r
= amdgpu_vm_bo_update(adev
, bo_va
, true);
1962 spin_lock(&vm
->status_lock
);
1964 spin_unlock(&vm
->status_lock
);
1967 r
= amdgpu_sync_fence(adev
, sync
, bo_va
->last_pt_update
);
1973 * amdgpu_vm_bo_add - add a bo to a specific vm
1975 * @adev: amdgpu_device pointer
1977 * @bo: amdgpu buffer object
1979 * Add @bo into the requested vm.
1980 * Add @bo to the list of bos associated with the vm
1981 * Returns newly added bo_va or NULL for failure
1983 * Object has to be reserved!
1985 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
1986 struct amdgpu_vm
*vm
,
1987 struct amdgpu_bo
*bo
)
1989 struct amdgpu_bo_va
*bo_va
;
1991 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
1992 if (bo_va
== NULL
) {
1997 bo_va
->ref_count
= 1;
1998 INIT_LIST_HEAD(&bo_va
->bo_list
);
1999 INIT_LIST_HEAD(&bo_va
->valids
);
2000 INIT_LIST_HEAD(&bo_va
->invalids
);
2001 INIT_LIST_HEAD(&bo_va
->vm_status
);
2004 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
2010 * amdgpu_vm_bo_map - map bo inside a vm
2012 * @adev: amdgpu_device pointer
2013 * @bo_va: bo_va to store the address
2014 * @saddr: where to map the BO
2015 * @offset: requested offset in the BO
2016 * @flags: attributes of pages (read/write/valid/etc.)
2018 * Add a mapping of the BO at the specefied addr into the VM.
2019 * Returns 0 for success, error for failure.
2021 * Object has to be reserved and unreserved outside!
2023 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
2024 struct amdgpu_bo_va
*bo_va
,
2025 uint64_t saddr
, uint64_t offset
,
2026 uint64_t size
, uint64_t flags
)
2028 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2029 struct amdgpu_vm
*vm
= bo_va
->vm
;
2032 /* validate the parameters */
2033 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
2034 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
2037 /* make sure object fit at this offset */
2038 eaddr
= saddr
+ size
- 1;
2039 if (saddr
>= eaddr
||
2040 (bo_va
->bo
&& offset
+ size
> amdgpu_bo_size(bo_va
->bo
)))
2043 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2044 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2046 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2048 /* bo and tmp overlap, invalid addr */
2049 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2050 "0x%010Lx-0x%010Lx\n", bo_va
->bo
, saddr
, eaddr
,
2051 tmp
->start
, tmp
->last
+ 1);
2055 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2059 INIT_LIST_HEAD(&mapping
->list
);
2060 mapping
->start
= saddr
;
2061 mapping
->last
= eaddr
;
2062 mapping
->offset
= offset
;
2063 mapping
->flags
= flags
;
2065 list_add(&mapping
->list
, &bo_va
->invalids
);
2066 amdgpu_vm_it_insert(mapping
, &vm
->va
);
2068 if (flags
& AMDGPU_PTE_PRT
)
2069 amdgpu_vm_prt_get(adev
);
2075 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2077 * @adev: amdgpu_device pointer
2078 * @bo_va: bo_va to store the address
2079 * @saddr: where to map the BO
2080 * @offset: requested offset in the BO
2081 * @flags: attributes of pages (read/write/valid/etc.)
2083 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2084 * mappings as we do so.
2085 * Returns 0 for success, error for failure.
2087 * Object has to be reserved and unreserved outside!
2089 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
2090 struct amdgpu_bo_va
*bo_va
,
2091 uint64_t saddr
, uint64_t offset
,
2092 uint64_t size
, uint64_t flags
)
2094 struct amdgpu_bo_va_mapping
*mapping
;
2095 struct amdgpu_vm
*vm
= bo_va
->vm
;
2099 /* validate the parameters */
2100 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
2101 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
2104 /* make sure object fit at this offset */
2105 eaddr
= saddr
+ size
- 1;
2106 if (saddr
>= eaddr
||
2107 (bo_va
->bo
&& offset
+ size
> amdgpu_bo_size(bo_va
->bo
)))
2110 /* Allocate all the needed memory */
2111 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2115 r
= amdgpu_vm_bo_clear_mappings(adev
, bo_va
->vm
, saddr
, size
);
2121 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2122 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2124 mapping
->start
= saddr
;
2125 mapping
->last
= eaddr
;
2126 mapping
->offset
= offset
;
2127 mapping
->flags
= flags
;
2129 list_add(&mapping
->list
, &bo_va
->invalids
);
2130 amdgpu_vm_it_insert(mapping
, &vm
->va
);
2132 if (flags
& AMDGPU_PTE_PRT
)
2133 amdgpu_vm_prt_get(adev
);
2139 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2141 * @adev: amdgpu_device pointer
2142 * @bo_va: bo_va to remove the address from
2143 * @saddr: where to the BO is mapped
2145 * Remove a mapping of the BO at the specefied addr from the VM.
2146 * Returns 0 for success, error for failure.
2148 * Object has to be reserved and unreserved outside!
2150 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
2151 struct amdgpu_bo_va
*bo_va
,
2154 struct amdgpu_bo_va_mapping
*mapping
;
2155 struct amdgpu_vm
*vm
= bo_va
->vm
;
2158 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2160 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
2161 if (mapping
->start
== saddr
)
2165 if (&mapping
->list
== &bo_va
->valids
) {
2168 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
2169 if (mapping
->start
== saddr
)
2173 if (&mapping
->list
== &bo_va
->invalids
)
2177 list_del(&mapping
->list
);
2178 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2179 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2182 list_add(&mapping
->list
, &vm
->freed
);
2184 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2185 bo_va
->last_pt_update
);
2191 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2193 * @adev: amdgpu_device pointer
2194 * @vm: VM structure to use
2195 * @saddr: start of the range
2196 * @size: size of the range
2198 * Remove all mappings in a range, split them as appropriate.
2199 * Returns 0 for success, error for failure.
2201 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
2202 struct amdgpu_vm
*vm
,
2203 uint64_t saddr
, uint64_t size
)
2205 struct amdgpu_bo_va_mapping
*before
, *after
, *tmp
, *next
;
2209 eaddr
= saddr
+ size
- 1;
2210 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2211 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2213 /* Allocate all the needed memory */
2214 before
= kzalloc(sizeof(*before
), GFP_KERNEL
);
2217 INIT_LIST_HEAD(&before
->list
);
2219 after
= kzalloc(sizeof(*after
), GFP_KERNEL
);
2224 INIT_LIST_HEAD(&after
->list
);
2226 /* Now gather all removed mappings */
2227 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2229 /* Remember mapping split at the start */
2230 if (tmp
->start
< saddr
) {
2231 before
->start
= tmp
->start
;
2232 before
->last
= saddr
- 1;
2233 before
->offset
= tmp
->offset
;
2234 before
->flags
= tmp
->flags
;
2235 list_add(&before
->list
, &tmp
->list
);
2238 /* Remember mapping split at the end */
2239 if (tmp
->last
> eaddr
) {
2240 after
->start
= eaddr
+ 1;
2241 after
->last
= tmp
->last
;
2242 after
->offset
= tmp
->offset
;
2243 after
->offset
+= after
->start
- tmp
->start
;
2244 after
->flags
= tmp
->flags
;
2245 list_add(&after
->list
, &tmp
->list
);
2248 list_del(&tmp
->list
);
2249 list_add(&tmp
->list
, &removed
);
2251 tmp
= amdgpu_vm_it_iter_next(tmp
, saddr
, eaddr
);
2254 /* And free them up */
2255 list_for_each_entry_safe(tmp
, next
, &removed
, list
) {
2256 amdgpu_vm_it_remove(tmp
, &vm
->va
);
2257 list_del(&tmp
->list
);
2259 if (tmp
->start
< saddr
)
2261 if (tmp
->last
> eaddr
)
2264 list_add(&tmp
->list
, &vm
->freed
);
2265 trace_amdgpu_vm_bo_unmap(NULL
, tmp
);
2268 /* Insert partial mapping before the range */
2269 if (!list_empty(&before
->list
)) {
2270 amdgpu_vm_it_insert(before
, &vm
->va
);
2271 if (before
->flags
& AMDGPU_PTE_PRT
)
2272 amdgpu_vm_prt_get(adev
);
2277 /* Insert partial mapping after the range */
2278 if (!list_empty(&after
->list
)) {
2279 amdgpu_vm_it_insert(after
, &vm
->va
);
2280 if (after
->flags
& AMDGPU_PTE_PRT
)
2281 amdgpu_vm_prt_get(adev
);
2290 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2292 * @adev: amdgpu_device pointer
2293 * @bo_va: requested bo_va
2295 * Remove @bo_va->bo from the requested vm.
2297 * Object have to be reserved!
2299 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
2300 struct amdgpu_bo_va
*bo_va
)
2302 struct amdgpu_bo_va_mapping
*mapping
, *next
;
2303 struct amdgpu_vm
*vm
= bo_va
->vm
;
2305 list_del(&bo_va
->bo_list
);
2307 spin_lock(&vm
->status_lock
);
2308 list_del(&bo_va
->vm_status
);
2309 spin_unlock(&vm
->status_lock
);
2311 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
2312 list_del(&mapping
->list
);
2313 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2314 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2315 list_add(&mapping
->list
, &vm
->freed
);
2317 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
2318 list_del(&mapping
->list
);
2319 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2320 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2321 bo_va
->last_pt_update
);
2324 dma_fence_put(bo_va
->last_pt_update
);
2329 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2331 * @adev: amdgpu_device pointer
2333 * @bo: amdgpu buffer object
2335 * Mark @bo as invalid.
2337 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
2338 struct amdgpu_bo
*bo
)
2340 struct amdgpu_bo_va
*bo_va
;
2342 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
2343 spin_lock(&bo_va
->vm
->status_lock
);
2344 if (list_empty(&bo_va
->vm_status
))
2345 list_add(&bo_va
->vm_status
, &bo_va
->vm
->invalidated
);
2346 spin_unlock(&bo_va
->vm
->status_lock
);
2350 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size
)
2352 /* Total bits covered by PD + PTs */
2353 unsigned bits
= ilog2(vm_size
) + 18;
2355 /* Make sure the PD is 4K in size up to 8GB address space.
2356 Above that split equal between PD and PTs */
2360 return ((bits
+ 3) / 2);
2364 * amdgpu_vm_adjust_size - adjust vm size and block size
2366 * @adev: amdgpu_device pointer
2367 * @vm_size: the default vm size if it's set auto
2369 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint64_t vm_size
)
2371 /* adjust vm size firstly */
2372 if (amdgpu_vm_size
== -1)
2373 adev
->vm_manager
.vm_size
= vm_size
;
2375 adev
->vm_manager
.vm_size
= amdgpu_vm_size
;
2377 /* block size depends on vm size */
2378 if (amdgpu_vm_block_size
== -1)
2379 adev
->vm_manager
.block_size
=
2380 amdgpu_vm_get_block_size(adev
->vm_manager
.vm_size
);
2382 adev
->vm_manager
.block_size
= amdgpu_vm_block_size
;
2384 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
2385 adev
->vm_manager
.vm_size
, adev
->vm_manager
.block_size
);
2389 * amdgpu_vm_init - initialize a vm instance
2391 * @adev: amdgpu_device pointer
2393 * @vm_context: Indicates if it GFX or Compute context
2397 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
2400 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
2401 AMDGPU_VM_PTE_COUNT(adev
) * 8);
2402 unsigned ring_instance
;
2403 struct amdgpu_ring
*ring
;
2404 struct amd_sched_rq
*rq
;
2409 vm
->client_id
= atomic64_inc_return(&adev
->vm_manager
.client_counter
);
2410 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2411 vm
->reserved_vmid
[i
] = NULL
;
2412 spin_lock_init(&vm
->status_lock
);
2413 INIT_LIST_HEAD(&vm
->invalidated
);
2414 INIT_LIST_HEAD(&vm
->cleared
);
2415 INIT_LIST_HEAD(&vm
->freed
);
2417 /* create scheduler entity for page table updates */
2419 ring_instance
= atomic_inc_return(&adev
->vm_manager
.vm_pte_next_ring
);
2420 ring_instance
%= adev
->vm_manager
.vm_pte_num_rings
;
2421 ring
= adev
->vm_manager
.vm_pte_rings
[ring_instance
];
2422 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_KERNEL
];
2423 r
= amd_sched_entity_init(&ring
->sched
, &vm
->entity
,
2424 rq
, amdgpu_sched_jobs
);
2428 if (vm_context
== AMDGPU_VM_CONTEXT_COMPUTE
)
2429 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2430 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
2432 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2433 AMDGPU_VM_USE_CPU_FOR_GFX
);
2434 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2435 vm
->use_cpu_for_update
? "CPU" : "SDMA");
2436 WARN_ONCE((vm
->use_cpu_for_update
& !amdgpu_vm_is_large_bar(adev
)),
2437 "CPU update of VM recommended only for large BAR system\n");
2438 vm
->last_dir_update
= NULL
;
2440 flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
2441 AMDGPU_GEM_CREATE_VRAM_CLEARED
;
2442 if (vm
->use_cpu_for_update
)
2443 flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
2445 flags
|= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
2446 AMDGPU_GEM_CREATE_SHADOW
);
2448 r
= amdgpu_bo_create(adev
, amdgpu_vm_bo_size(adev
, 0), align
, true,
2449 AMDGPU_GEM_DOMAIN_VRAM
,
2451 NULL
, NULL
, &vm
->root
.bo
);
2453 goto error_free_sched_entity
;
2455 r
= amdgpu_bo_reserve(vm
->root
.bo
, false);
2457 goto error_free_root
;
2459 vm
->last_eviction_counter
= atomic64_read(&adev
->num_evictions
);
2460 amdgpu_bo_unreserve(vm
->root
.bo
);
2465 amdgpu_bo_unref(&vm
->root
.bo
->shadow
);
2466 amdgpu_bo_unref(&vm
->root
.bo
);
2469 error_free_sched_entity
:
2470 amd_sched_entity_fini(&ring
->sched
, &vm
->entity
);
2476 * amdgpu_vm_free_levels - free PD/PT levels
2478 * @level: PD/PT starting level to free
2480 * Free the page directory or page table level and all sub levels.
2482 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt
*level
)
2487 amdgpu_bo_unref(&level
->bo
->shadow
);
2488 amdgpu_bo_unref(&level
->bo
);
2492 for (i
= 0; i
<= level
->last_entry_used
; i
++)
2493 amdgpu_vm_free_levels(&level
->entries
[i
]);
2495 kvfree(level
->entries
);
2499 * amdgpu_vm_fini - tear down a vm instance
2501 * @adev: amdgpu_device pointer
2505 * Unbind the VM and remove all bos from the vm bo list
2507 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2509 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2510 bool prt_fini_needed
= !!adev
->gart
.gart_funcs
->set_prt
;
2513 amd_sched_entity_fini(vm
->entity
.sched
, &vm
->entity
);
2515 if (!RB_EMPTY_ROOT(&vm
->va
)) {
2516 dev_err(adev
->dev
, "still active bo inside vm\n");
2518 rbtree_postorder_for_each_entry_safe(mapping
, tmp
, &vm
->va
, rb
) {
2519 list_del(&mapping
->list
);
2520 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2523 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
2524 if (mapping
->flags
& AMDGPU_PTE_PRT
&& prt_fini_needed
) {
2525 amdgpu_vm_prt_fini(adev
, vm
);
2526 prt_fini_needed
= false;
2529 list_del(&mapping
->list
);
2530 amdgpu_vm_free_mapping(adev
, vm
, mapping
, NULL
);
2533 amdgpu_vm_free_levels(&vm
->root
);
2534 dma_fence_put(vm
->last_dir_update
);
2535 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2536 amdgpu_vm_free_reserved_vmid(adev
, vm
, i
);
2540 * amdgpu_vm_manager_init - init the VM manager
2542 * @adev: amdgpu_device pointer
2544 * Initialize the VM manager structures
2546 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
2550 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
2551 struct amdgpu_vm_id_manager
*id_mgr
=
2552 &adev
->vm_manager
.id_mgr
[i
];
2554 mutex_init(&id_mgr
->lock
);
2555 INIT_LIST_HEAD(&id_mgr
->ids_lru
);
2556 atomic_set(&id_mgr
->reserved_vmid_num
, 0);
2558 /* skip over VMID 0, since it is the system VM */
2559 for (j
= 1; j
< id_mgr
->num_ids
; ++j
) {
2560 amdgpu_vm_reset_id(adev
, i
, j
);
2561 amdgpu_sync_create(&id_mgr
->ids
[i
].active
);
2562 list_add_tail(&id_mgr
->ids
[j
].list
, &id_mgr
->ids_lru
);
2566 adev
->vm_manager
.fence_context
=
2567 dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
2568 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
2569 adev
->vm_manager
.seqno
[i
] = 0;
2571 atomic_set(&adev
->vm_manager
.vm_pte_next_ring
, 0);
2572 atomic64_set(&adev
->vm_manager
.client_counter
, 0);
2573 spin_lock_init(&adev
->vm_manager
.prt_lock
);
2574 atomic_set(&adev
->vm_manager
.num_prt_users
, 0);
2576 /* If not overridden by the user, by default, only in large BAR systems
2577 * Compute VM tables will be updated by CPU
2579 #ifdef CONFIG_X86_64
2580 if (amdgpu_vm_update_mode
== -1) {
2581 if (amdgpu_vm_is_large_bar(adev
))
2582 adev
->vm_manager
.vm_update_mode
=
2583 AMDGPU_VM_USE_CPU_FOR_COMPUTE
;
2585 adev
->vm_manager
.vm_update_mode
= 0;
2587 adev
->vm_manager
.vm_update_mode
= amdgpu_vm_update_mode
;
2589 adev
->vm_manager
.vm_update_mode
= 0;
2595 * amdgpu_vm_manager_fini - cleanup VM manager
2597 * @adev: amdgpu_device pointer
2599 * Cleanup the VM manager and free resources.
2601 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
2605 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
2606 struct amdgpu_vm_id_manager
*id_mgr
=
2607 &adev
->vm_manager
.id_mgr
[i
];
2609 mutex_destroy(&id_mgr
->lock
);
2610 for (j
= 0; j
< AMDGPU_NUM_VM
; ++j
) {
2611 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[j
];
2613 amdgpu_sync_free(&id
->active
);
2614 dma_fence_put(id
->flushed_updates
);
2615 dma_fence_put(id
->last_flush
);
2620 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
2622 union drm_amdgpu_vm
*args
= data
;
2623 struct amdgpu_device
*adev
= dev
->dev_private
;
2624 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
2627 switch (args
->in
.op
) {
2628 case AMDGPU_VM_OP_RESERVE_VMID
:
2629 /* current, we only have requirement to reserve vmid from gfxhub */
2630 r
= amdgpu_vm_alloc_reserved_vmid(adev
, &fpriv
->vm
,
2635 case AMDGPU_VM_OP_UNRESERVE_VMID
:
2636 amdgpu_vm_free_reserved_vmid(adev
, &fpriv
->vm
, AMDGPU_GFXHUB
);