2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
31 #include <drm/amdgpu_drm.h>
33 #include "amdgpu_trace.h"
37 * GPUVM is similar to the legacy gart on older asics, however
38 * rather than there being a single global gart table
39 * for the entire GPU, there are multiple VM page tables active
40 * at any given time. The VM page tables can contain a mix
41 * vram pages and system memory pages and system memory pages
42 * can be mapped as snooped (cached system pages) or unsnooped
43 * (uncached system pages).
44 * Each VM has an ID associated with it and there is a page table
45 * associated with each VMID. When execting a command buffer,
46 * the kernel tells the the ring what VMID to use for that command
47 * buffer. VMIDs are allocated dynamically as commands are submitted.
48 * The userspace drivers maintain their own address space and the kernel
49 * sets up their pages tables accordingly when they submit their
50 * command buffers and a VMID is assigned.
51 * Cayman/Trinity support up to 8 active VMs at any given time;
55 #define START(node) ((node)->start)
56 #define LAST(node) ((node)->last)
58 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping
, rb
, uint64_t, __subtree_last
,
59 START
, LAST
, static, amdgpu_vm_it
)
64 /* Local structure. Encapsulate some VM table update parameters to reduce
65 * the number of function parameters
67 struct amdgpu_pte_update_params
{
68 /* amdgpu device we do this update for */
69 struct amdgpu_device
*adev
;
70 /* optional amdgpu_vm we do this update for */
72 /* address where to copy page table entries from */
74 /* indirect buffer to fill with commands */
76 /* Function which actually does the update */
77 void (*func
)(struct amdgpu_pte_update_params
*params
, uint64_t pe
,
78 uint64_t addr
, unsigned count
, uint32_t incr
,
80 /* The next two are used during VM update by CPU
81 * DMA addresses to use for mapping
82 * Kernel pointer of PD/PT BO that needs to be updated
84 dma_addr_t
*pages_addr
;
88 /* Helper to disable partial resident texture feature from a fence callback */
89 struct amdgpu_prt_cb
{
90 struct amdgpu_device
*adev
;
91 struct dma_fence_cb cb
;
95 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
97 * @adev: amdgpu_device pointer
99 * Calculate the number of entries in a page directory or page table.
101 static unsigned amdgpu_vm_num_entries(struct amdgpu_device
*adev
,
105 /* For the root directory */
106 return adev
->vm_manager
.max_pfn
>>
107 (adev
->vm_manager
.block_size
*
108 adev
->vm_manager
.num_level
);
109 else if (level
== adev
->vm_manager
.num_level
)
110 /* For the page tables on the leaves */
111 return AMDGPU_VM_PTE_COUNT(adev
);
113 /* Everything in between */
114 return 1 << adev
->vm_manager
.block_size
;
118 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
120 * @adev: amdgpu_device pointer
122 * Calculate the size of the BO for a page directory or page table in bytes.
124 static unsigned amdgpu_vm_bo_size(struct amdgpu_device
*adev
, unsigned level
)
126 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev
, level
) * 8);
130 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
132 * @vm: vm providing the BOs
133 * @validated: head of validation list
134 * @entry: entry to add
136 * Add the page directory to the list of BOs to
137 * validate for command submission.
139 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
140 struct list_head
*validated
,
141 struct amdgpu_bo_list_entry
*entry
)
143 entry
->robj
= vm
->root
.bo
;
145 entry
->tv
.bo
= &entry
->robj
->tbo
;
146 entry
->tv
.shared
= true;
147 entry
->user_pages
= NULL
;
148 list_add(&entry
->tv
.head
, validated
);
152 * amdgpu_vm_validate_layer - validate a single page table level
154 * @parent: parent page table level
155 * @validate: callback to do the validation
156 * @param: parameter for the validation callback
158 * Validate the page table BOs on command submission if neccessary.
160 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt
*parent
,
161 int (*validate
)(void *, struct amdgpu_bo
*),
162 void *param
, bool use_cpu_for_update
,
163 struct ttm_bo_global
*glob
)
168 if (use_cpu_for_update
) {
169 r
= amdgpu_bo_kmap(parent
->bo
, NULL
);
174 if (!parent
->entries
)
177 for (i
= 0; i
<= parent
->last_entry_used
; ++i
) {
178 struct amdgpu_vm_pt
*entry
= &parent
->entries
[i
];
183 r
= validate(param
, entry
->bo
);
187 spin_lock(&glob
->lru_lock
);
188 ttm_bo_move_to_lru_tail(&entry
->bo
->tbo
);
189 if (entry
->bo
->shadow
)
190 ttm_bo_move_to_lru_tail(&entry
->bo
->shadow
->tbo
);
191 spin_unlock(&glob
->lru_lock
);
194 * Recurse into the sub directory. This is harmless because we
195 * have only a maximum of 5 layers.
197 r
= amdgpu_vm_validate_level(entry
, validate
, param
,
198 use_cpu_for_update
, glob
);
207 * amdgpu_vm_validate_pt_bos - validate the page table BOs
209 * @adev: amdgpu device pointer
210 * @vm: vm providing the BOs
211 * @validate: callback to do the validation
212 * @param: parameter for the validation callback
214 * Validate the page table BOs on command submission if neccessary.
216 int amdgpu_vm_validate_pt_bos(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
217 int (*validate
)(void *p
, struct amdgpu_bo
*bo
),
220 uint64_t num_evictions
;
222 /* We only need to validate the page tables
223 * if they aren't already valid.
225 num_evictions
= atomic64_read(&adev
->num_evictions
);
226 if (num_evictions
== vm
->last_eviction_counter
)
229 return amdgpu_vm_validate_level(&vm
->root
, validate
, param
,
230 vm
->use_cpu_for_update
,
231 adev
->mman
.bdev
.glob
);
235 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
237 * @adev: amdgpu_device pointer
239 * @saddr: start of the address range
240 * @eaddr: end of the address range
242 * Make sure the page directories and page tables are allocated
244 static int amdgpu_vm_alloc_levels(struct amdgpu_device
*adev
,
245 struct amdgpu_vm
*vm
,
246 struct amdgpu_vm_pt
*parent
,
247 uint64_t saddr
, uint64_t eaddr
,
250 unsigned shift
= (adev
->vm_manager
.num_level
- level
) *
251 adev
->vm_manager
.block_size
;
252 unsigned pt_idx
, from
, to
;
255 uint64_t init_value
= 0;
257 if (!parent
->entries
) {
258 unsigned num_entries
= amdgpu_vm_num_entries(adev
, level
);
260 parent
->entries
= kvmalloc_array(num_entries
,
261 sizeof(struct amdgpu_vm_pt
),
262 GFP_KERNEL
| __GFP_ZERO
);
263 if (!parent
->entries
)
265 memset(parent
->entries
, 0 , sizeof(struct amdgpu_vm_pt
));
268 from
= saddr
>> shift
;
270 if (from
>= amdgpu_vm_num_entries(adev
, level
) ||
271 to
>= amdgpu_vm_num_entries(adev
, level
))
274 if (to
> parent
->last_entry_used
)
275 parent
->last_entry_used
= to
;
278 saddr
= saddr
& ((1 << shift
) - 1);
279 eaddr
= eaddr
& ((1 << shift
) - 1);
281 flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
282 AMDGPU_GEM_CREATE_VRAM_CLEARED
;
283 if (vm
->use_cpu_for_update
)
284 flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
286 flags
|= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
287 AMDGPU_GEM_CREATE_SHADOW
);
289 if (vm
->pte_support_ats
) {
290 init_value
= AMDGPU_PTE_SYSTEM
;
291 if (level
!= adev
->vm_manager
.num_level
- 1)
292 init_value
|= AMDGPU_PDE_PTE
;
295 /* walk over the address space and allocate the page tables */
296 for (pt_idx
= from
; pt_idx
<= to
; ++pt_idx
) {
297 struct reservation_object
*resv
= vm
->root
.bo
->tbo
.resv
;
298 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
299 struct amdgpu_bo
*pt
;
302 r
= amdgpu_bo_create(adev
,
303 amdgpu_vm_bo_size(adev
, level
),
304 AMDGPU_GPU_PAGE_SIZE
, true,
305 AMDGPU_GEM_DOMAIN_VRAM
,
307 NULL
, resv
, init_value
, &pt
);
311 if (vm
->use_cpu_for_update
) {
312 r
= amdgpu_bo_kmap(pt
, NULL
);
314 amdgpu_bo_unref(&pt
);
319 /* Keep a reference to the root directory to avoid
320 * freeing them up in the wrong order.
322 pt
->parent
= amdgpu_bo_ref(vm
->root
.bo
);
328 if (level
< adev
->vm_manager
.num_level
) {
329 uint64_t sub_saddr
= (pt_idx
== from
) ? saddr
: 0;
330 uint64_t sub_eaddr
= (pt_idx
== to
) ? eaddr
:
332 r
= amdgpu_vm_alloc_levels(adev
, vm
, entry
, sub_saddr
,
343 * amdgpu_vm_alloc_pts - Allocate page tables.
345 * @adev: amdgpu_device pointer
346 * @vm: VM to allocate page tables for
347 * @saddr: Start address which needs to be allocated
348 * @size: Size from start address we need.
350 * Make sure the page tables are allocated.
352 int amdgpu_vm_alloc_pts(struct amdgpu_device
*adev
,
353 struct amdgpu_vm
*vm
,
354 uint64_t saddr
, uint64_t size
)
359 /* validate the parameters */
360 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| size
& AMDGPU_GPU_PAGE_MASK
)
363 eaddr
= saddr
+ size
- 1;
364 last_pfn
= eaddr
/ AMDGPU_GPU_PAGE_SIZE
;
365 if (last_pfn
>= adev
->vm_manager
.max_pfn
) {
366 dev_err(adev
->dev
, "va above limit (0x%08llX >= 0x%08llX)\n",
367 last_pfn
, adev
->vm_manager
.max_pfn
);
371 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
372 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
374 return amdgpu_vm_alloc_levels(adev
, vm
, &vm
->root
, saddr
, eaddr
, 0);
378 * amdgpu_vm_had_gpu_reset - check if reset occured since last use
380 * @adev: amdgpu_device pointer
381 * @id: VMID structure
383 * Check if GPU reset occured since last use of the VMID.
385 static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device
*adev
,
386 struct amdgpu_vm_id
*id
)
388 return id
->current_gpu_reset_count
!=
389 atomic_read(&adev
->gpu_reset_counter
);
392 static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm
*vm
, unsigned vmhub
)
394 return !!vm
->reserved_vmid
[vmhub
];
397 /* idr_mgr->lock must be held */
398 static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm
*vm
,
399 struct amdgpu_ring
*ring
,
400 struct amdgpu_sync
*sync
,
401 struct dma_fence
*fence
,
402 struct amdgpu_job
*job
)
404 struct amdgpu_device
*adev
= ring
->adev
;
405 unsigned vmhub
= ring
->funcs
->vmhub
;
406 uint64_t fence_context
= adev
->fence_context
+ ring
->idx
;
407 struct amdgpu_vm_id
*id
= vm
->reserved_vmid
[vmhub
];
408 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
409 struct dma_fence
*updates
= sync
->last_vm_update
;
411 struct dma_fence
*flushed
, *tmp
;
412 bool needs_flush
= vm
->use_cpu_for_update
;
414 flushed
= id
->flushed_updates
;
415 if ((amdgpu_vm_had_gpu_reset(adev
, id
)) ||
416 (atomic64_read(&id
->owner
) != vm
->client_id
) ||
417 (job
->vm_pd_addr
!= id
->pd_gpu_addr
) ||
418 (updates
&& (!flushed
|| updates
->context
!= flushed
->context
||
419 dma_fence_is_later(updates
, flushed
))) ||
420 (!id
->last_flush
|| (id
->last_flush
->context
!= fence_context
&&
421 !dma_fence_is_signaled(id
->last_flush
)))) {
423 /* to prevent one context starved by another context */
425 tmp
= amdgpu_sync_peek_fence(&id
->active
, ring
);
427 r
= amdgpu_sync_fence(adev
, sync
, tmp
);
432 /* Good we can use this VMID. Remember this submission as
435 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
439 if (updates
&& (!flushed
|| updates
->context
!= flushed
->context
||
440 dma_fence_is_later(updates
, flushed
))) {
441 dma_fence_put(id
->flushed_updates
);
442 id
->flushed_updates
= dma_fence_get(updates
);
444 id
->pd_gpu_addr
= job
->vm_pd_addr
;
445 atomic64_set(&id
->owner
, vm
->client_id
);
446 job
->vm_needs_flush
= needs_flush
;
448 dma_fence_put(id
->last_flush
);
449 id
->last_flush
= NULL
;
451 job
->vm_id
= id
- id_mgr
->ids
;
452 trace_amdgpu_vm_grab_id(vm
, ring
, job
);
458 * amdgpu_vm_grab_id - allocate the next free VMID
460 * @vm: vm to allocate id for
461 * @ring: ring we want to submit job to
462 * @sync: sync object where we add dependencies
463 * @fence: fence protecting ID from reuse
465 * Allocate an id for the vm, adding fences to the sync obj as necessary.
467 int amdgpu_vm_grab_id(struct amdgpu_vm
*vm
, struct amdgpu_ring
*ring
,
468 struct amdgpu_sync
*sync
, struct dma_fence
*fence
,
469 struct amdgpu_job
*job
)
471 struct amdgpu_device
*adev
= ring
->adev
;
472 unsigned vmhub
= ring
->funcs
->vmhub
;
473 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
474 uint64_t fence_context
= adev
->fence_context
+ ring
->idx
;
475 struct dma_fence
*updates
= sync
->last_vm_update
;
476 struct amdgpu_vm_id
*id
, *idle
;
477 struct dma_fence
**fences
;
481 mutex_lock(&id_mgr
->lock
);
482 if (amdgpu_vm_reserved_vmid_ready(vm
, vmhub
)) {
483 r
= amdgpu_vm_grab_reserved_vmid_locked(vm
, ring
, sync
, fence
, job
);
484 mutex_unlock(&id_mgr
->lock
);
487 fences
= kmalloc_array(sizeof(void *), id_mgr
->num_ids
, GFP_KERNEL
);
489 mutex_unlock(&id_mgr
->lock
);
492 /* Check if we have an idle VMID */
494 list_for_each_entry(idle
, &id_mgr
->ids_lru
, list
) {
495 fences
[i
] = amdgpu_sync_peek_fence(&idle
->active
, ring
);
501 /* If we can't find a idle VMID to use, wait till one becomes available */
502 if (&idle
->list
== &id_mgr
->ids_lru
) {
503 u64 fence_context
= adev
->vm_manager
.fence_context
+ ring
->idx
;
504 unsigned seqno
= ++adev
->vm_manager
.seqno
[ring
->idx
];
505 struct dma_fence_array
*array
;
508 for (j
= 0; j
< i
; ++j
)
509 dma_fence_get(fences
[j
]);
511 array
= dma_fence_array_create(i
, fences
, fence_context
,
514 for (j
= 0; j
< i
; ++j
)
515 dma_fence_put(fences
[j
]);
522 r
= amdgpu_sync_fence(ring
->adev
, sync
, &array
->base
);
523 dma_fence_put(&array
->base
);
527 mutex_unlock(&id_mgr
->lock
);
533 job
->vm_needs_flush
= vm
->use_cpu_for_update
;
534 /* Check if we can use a VMID already assigned to this VM */
535 list_for_each_entry_reverse(id
, &id_mgr
->ids_lru
, list
) {
536 struct dma_fence
*flushed
;
537 bool needs_flush
= vm
->use_cpu_for_update
;
539 /* Check all the prerequisites to using this VMID */
540 if (amdgpu_vm_had_gpu_reset(adev
, id
))
543 if (atomic64_read(&id
->owner
) != vm
->client_id
)
546 if (job
->vm_pd_addr
!= id
->pd_gpu_addr
)
549 if (!id
->last_flush
||
550 (id
->last_flush
->context
!= fence_context
&&
551 !dma_fence_is_signaled(id
->last_flush
)))
554 flushed
= id
->flushed_updates
;
555 if (updates
&& (!flushed
|| dma_fence_is_later(updates
, flushed
)))
558 /* Concurrent flushes are only possible starting with Vega10 */
559 if (adev
->asic_type
< CHIP_VEGA10
&& needs_flush
)
562 /* Good we can use this VMID. Remember this submission as
565 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
569 if (updates
&& (!flushed
|| dma_fence_is_later(updates
, flushed
))) {
570 dma_fence_put(id
->flushed_updates
);
571 id
->flushed_updates
= dma_fence_get(updates
);
577 goto no_flush_needed
;
581 /* Still no ID to use? Then use the idle one found earlier */
584 /* Remember this submission as user of the VMID */
585 r
= amdgpu_sync_fence(ring
->adev
, &id
->active
, fence
);
589 id
->pd_gpu_addr
= job
->vm_pd_addr
;
590 dma_fence_put(id
->flushed_updates
);
591 id
->flushed_updates
= dma_fence_get(updates
);
592 atomic64_set(&id
->owner
, vm
->client_id
);
595 job
->vm_needs_flush
= true;
596 dma_fence_put(id
->last_flush
);
597 id
->last_flush
= NULL
;
600 list_move_tail(&id
->list
, &id_mgr
->ids_lru
);
602 job
->vm_id
= id
- id_mgr
->ids
;
603 trace_amdgpu_vm_grab_id(vm
, ring
, job
);
606 mutex_unlock(&id_mgr
->lock
);
610 static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device
*adev
,
611 struct amdgpu_vm
*vm
,
614 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
616 mutex_lock(&id_mgr
->lock
);
617 if (vm
->reserved_vmid
[vmhub
]) {
618 list_add(&vm
->reserved_vmid
[vmhub
]->list
,
620 vm
->reserved_vmid
[vmhub
] = NULL
;
621 atomic_dec(&id_mgr
->reserved_vmid_num
);
623 mutex_unlock(&id_mgr
->lock
);
626 static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device
*adev
,
627 struct amdgpu_vm
*vm
,
630 struct amdgpu_vm_id_manager
*id_mgr
;
631 struct amdgpu_vm_id
*idle
;
634 id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
635 mutex_lock(&id_mgr
->lock
);
636 if (vm
->reserved_vmid
[vmhub
])
638 if (atomic_inc_return(&id_mgr
->reserved_vmid_num
) >
639 AMDGPU_VM_MAX_RESERVED_VMID
) {
640 DRM_ERROR("Over limitation of reserved vmid\n");
641 atomic_dec(&id_mgr
->reserved_vmid_num
);
645 /* Select the first entry VMID */
646 idle
= list_first_entry(&id_mgr
->ids_lru
, struct amdgpu_vm_id
, list
);
647 list_del_init(&idle
->list
);
648 vm
->reserved_vmid
[vmhub
] = idle
;
649 mutex_unlock(&id_mgr
->lock
);
653 mutex_unlock(&id_mgr
->lock
);
658 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
660 * @adev: amdgpu_device pointer
662 void amdgpu_vm_check_compute_bug(struct amdgpu_device
*adev
)
664 const struct amdgpu_ip_block
*ip_block
;
665 bool has_compute_vm_bug
;
666 struct amdgpu_ring
*ring
;
669 has_compute_vm_bug
= false;
671 ip_block
= amdgpu_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_GFX
);
673 /* Compute has a VM bug for GFX version < 7.
674 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
675 if (ip_block
->version
->major
<= 7)
676 has_compute_vm_bug
= true;
677 else if (ip_block
->version
->major
== 8)
678 if (adev
->gfx
.mec_fw_version
< 673)
679 has_compute_vm_bug
= true;
682 for (i
= 0; i
< adev
->num_rings
; i
++) {
683 ring
= adev
->rings
[i
];
684 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
)
685 /* only compute rings */
686 ring
->has_compute_vm_bug
= has_compute_vm_bug
;
688 ring
->has_compute_vm_bug
= false;
692 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring
*ring
,
693 struct amdgpu_job
*job
)
695 struct amdgpu_device
*adev
= ring
->adev
;
696 unsigned vmhub
= ring
->funcs
->vmhub
;
697 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
698 struct amdgpu_vm_id
*id
;
699 bool gds_switch_needed
;
700 bool vm_flush_needed
= job
->vm_needs_flush
|| ring
->has_compute_vm_bug
;
704 id
= &id_mgr
->ids
[job
->vm_id
];
705 gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
706 id
->gds_base
!= job
->gds_base
||
707 id
->gds_size
!= job
->gds_size
||
708 id
->gws_base
!= job
->gws_base
||
709 id
->gws_size
!= job
->gws_size
||
710 id
->oa_base
!= job
->oa_base
||
711 id
->oa_size
!= job
->oa_size
);
713 if (amdgpu_vm_had_gpu_reset(adev
, id
))
716 return vm_flush_needed
|| gds_switch_needed
;
719 static bool amdgpu_vm_is_large_bar(struct amdgpu_device
*adev
)
721 return (adev
->mc
.real_vram_size
== adev
->mc
.visible_vram_size
);
725 * amdgpu_vm_flush - hardware flush the vm
727 * @ring: ring to use for flush
728 * @vm_id: vmid number to use
729 * @pd_addr: address of the page directory
731 * Emit a VM flush when it is necessary.
733 int amdgpu_vm_flush(struct amdgpu_ring
*ring
, struct amdgpu_job
*job
, bool need_pipe_sync
)
735 struct amdgpu_device
*adev
= ring
->adev
;
736 unsigned vmhub
= ring
->funcs
->vmhub
;
737 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
738 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[job
->vm_id
];
739 bool gds_switch_needed
= ring
->funcs
->emit_gds_switch
&& (
740 id
->gds_base
!= job
->gds_base
||
741 id
->gds_size
!= job
->gds_size
||
742 id
->gws_base
!= job
->gws_base
||
743 id
->gws_size
!= job
->gws_size
||
744 id
->oa_base
!= job
->oa_base
||
745 id
->oa_size
!= job
->oa_size
);
746 bool vm_flush_needed
= job
->vm_needs_flush
;
747 unsigned patch_offset
= 0;
750 if (amdgpu_vm_had_gpu_reset(adev
, id
)) {
751 gds_switch_needed
= true;
752 vm_flush_needed
= true;
755 if (!vm_flush_needed
&& !gds_switch_needed
&& !need_pipe_sync
)
758 if (ring
->funcs
->init_cond_exec
)
759 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
762 amdgpu_ring_emit_pipeline_sync(ring
);
764 if (ring
->funcs
->emit_vm_flush
&& vm_flush_needed
) {
765 struct dma_fence
*fence
;
767 trace_amdgpu_vm_flush(ring
, job
->vm_id
, job
->vm_pd_addr
);
768 amdgpu_ring_emit_vm_flush(ring
, job
->vm_id
, job
->vm_pd_addr
);
770 r
= amdgpu_fence_emit(ring
, &fence
);
774 mutex_lock(&id_mgr
->lock
);
775 dma_fence_put(id
->last_flush
);
776 id
->last_flush
= fence
;
777 id
->current_gpu_reset_count
= atomic_read(&adev
->gpu_reset_counter
);
778 mutex_unlock(&id_mgr
->lock
);
781 if (ring
->funcs
->emit_gds_switch
&& gds_switch_needed
) {
782 id
->gds_base
= job
->gds_base
;
783 id
->gds_size
= job
->gds_size
;
784 id
->gws_base
= job
->gws_base
;
785 id
->gws_size
= job
->gws_size
;
786 id
->oa_base
= job
->oa_base
;
787 id
->oa_size
= job
->oa_size
;
788 amdgpu_ring_emit_gds_switch(ring
, job
->vm_id
, job
->gds_base
,
789 job
->gds_size
, job
->gws_base
,
790 job
->gws_size
, job
->oa_base
,
794 if (ring
->funcs
->patch_cond_exec
)
795 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
797 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
798 if (ring
->funcs
->emit_switch_buffer
) {
799 amdgpu_ring_emit_switch_buffer(ring
);
800 amdgpu_ring_emit_switch_buffer(ring
);
806 * amdgpu_vm_reset_id - reset VMID to zero
808 * @adev: amdgpu device structure
809 * @vm_id: vmid number to use
811 * Reset saved GDW, GWS and OA to force switch on next flush.
813 void amdgpu_vm_reset_id(struct amdgpu_device
*adev
, unsigned vmhub
,
816 struct amdgpu_vm_id_manager
*id_mgr
= &adev
->vm_manager
.id_mgr
[vmhub
];
817 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[vmid
];
819 atomic64_set(&id
->owner
, 0);
829 * amdgpu_vm_reset_all_id - reset VMID to zero
831 * @adev: amdgpu device structure
833 * Reset VMID to force flush on next use
835 void amdgpu_vm_reset_all_ids(struct amdgpu_device
*adev
)
839 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
840 struct amdgpu_vm_id_manager
*id_mgr
=
841 &adev
->vm_manager
.id_mgr
[i
];
843 for (j
= 1; j
< id_mgr
->num_ids
; ++j
)
844 amdgpu_vm_reset_id(adev
, i
, j
);
849 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
852 * @bo: requested buffer object
854 * Find @bo inside the requested vm.
855 * Search inside the @bos vm list for the requested vm
856 * Returns the found bo_va or NULL if none is found
858 * Object has to be reserved!
860 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
861 struct amdgpu_bo
*bo
)
863 struct amdgpu_bo_va
*bo_va
;
865 list_for_each_entry(bo_va
, &bo
->va
, base
.bo_list
) {
866 if (bo_va
->base
.vm
== vm
) {
874 * amdgpu_vm_do_set_ptes - helper to call the right asic function
876 * @params: see amdgpu_pte_update_params definition
877 * @pe: addr of the page entry
878 * @addr: dst addr to write into pe
879 * @count: number of page entries to update
880 * @incr: increase next addr by incr bytes
881 * @flags: hw access flags
883 * Traces the parameters and calls the right asic functions
884 * to setup the page table using the DMA.
886 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params
*params
,
887 uint64_t pe
, uint64_t addr
,
888 unsigned count
, uint32_t incr
,
891 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
894 amdgpu_vm_write_pte(params
->adev
, params
->ib
, pe
,
895 addr
| flags
, count
, incr
);
898 amdgpu_vm_set_pte_pde(params
->adev
, params
->ib
, pe
, addr
,
904 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
906 * @params: see amdgpu_pte_update_params definition
907 * @pe: addr of the page entry
908 * @addr: dst addr to write into pe
909 * @count: number of page entries to update
910 * @incr: increase next addr by incr bytes
911 * @flags: hw access flags
913 * Traces the parameters and calls the DMA function to copy the PTEs.
915 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params
*params
,
916 uint64_t pe
, uint64_t addr
,
917 unsigned count
, uint32_t incr
,
920 uint64_t src
= (params
->src
+ (addr
>> 12) * 8);
923 trace_amdgpu_vm_copy_ptes(pe
, src
, count
);
925 amdgpu_vm_copy_pte(params
->adev
, params
->ib
, pe
, src
, count
);
929 * amdgpu_vm_map_gart - Resolve gart mapping of addr
931 * @pages_addr: optional DMA address to use for lookup
932 * @addr: the unmapped addr
934 * Look up the physical address of the page that the pte resolves
935 * to and return the pointer for the page table entry.
937 static uint64_t amdgpu_vm_map_gart(const dma_addr_t
*pages_addr
, uint64_t addr
)
941 /* page table offset */
942 result
= pages_addr
[addr
>> PAGE_SHIFT
];
944 /* in case cpu page size != gpu page size*/
945 result
|= addr
& (~PAGE_MASK
);
947 result
&= 0xFFFFFFFFFFFFF000ULL
;
953 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
955 * @params: see amdgpu_pte_update_params definition
956 * @pe: kmap addr of the page entry
957 * @addr: dst addr to write into pe
958 * @count: number of page entries to update
959 * @incr: increase next addr by incr bytes
960 * @flags: hw access flags
962 * Write count number of PT/PD entries directly.
964 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params
*params
,
965 uint64_t pe
, uint64_t addr
,
966 unsigned count
, uint32_t incr
,
972 trace_amdgpu_vm_set_ptes(pe
, addr
, count
, incr
, flags
);
974 for (i
= 0; i
< count
; i
++) {
975 value
= params
->pages_addr
?
976 amdgpu_vm_map_gart(params
->pages_addr
, addr
) :
978 amdgpu_gart_set_pte_pde(params
->adev
, (void *)(uintptr_t)pe
,
984 static int amdgpu_vm_wait_pd(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
987 struct amdgpu_sync sync
;
990 amdgpu_sync_create(&sync
);
991 amdgpu_sync_resv(adev
, &sync
, vm
->root
.bo
->tbo
.resv
, owner
);
992 r
= amdgpu_sync_wait(&sync
, true);
993 amdgpu_sync_free(&sync
);
999 * amdgpu_vm_update_level - update a single level in the hierarchy
1001 * @adev: amdgpu_device pointer
1003 * @parent: parent directory
1005 * Makes sure all entries in @parent are up to date.
1006 * Returns 0 for success, error for failure.
1008 static int amdgpu_vm_update_level(struct amdgpu_device
*adev
,
1009 struct amdgpu_vm
*vm
,
1010 struct amdgpu_vm_pt
*parent
,
1013 struct amdgpu_bo
*shadow
;
1014 struct amdgpu_ring
*ring
= NULL
;
1015 uint64_t pd_addr
, shadow_addr
= 0;
1016 uint32_t incr
= amdgpu_vm_bo_size(adev
, level
+ 1);
1017 uint64_t last_pde
= ~0, last_pt
= ~0, last_shadow
= ~0;
1018 unsigned count
= 0, pt_idx
, ndw
= 0;
1019 struct amdgpu_job
*job
;
1020 struct amdgpu_pte_update_params params
;
1021 struct dma_fence
*fence
= NULL
;
1025 if (!parent
->entries
)
1028 memset(¶ms
, 0, sizeof(params
));
1030 shadow
= parent
->bo
->shadow
;
1032 if (vm
->use_cpu_for_update
) {
1033 pd_addr
= (unsigned long)amdgpu_bo_kptr(parent
->bo
);
1034 r
= amdgpu_vm_wait_pd(adev
, vm
, AMDGPU_FENCE_OWNER_VM
);
1038 params
.func
= amdgpu_vm_cpu_set_ptes
;
1040 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
,
1046 /* assume the worst case */
1047 ndw
+= parent
->last_entry_used
* 6;
1049 pd_addr
= amdgpu_bo_gpu_offset(parent
->bo
);
1052 shadow_addr
= amdgpu_bo_gpu_offset(shadow
);
1058 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1062 params
.ib
= &job
->ibs
[0];
1063 params
.func
= amdgpu_vm_do_set_ptes
;
1067 /* walk over the address space and update the directory */
1068 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
1069 struct amdgpu_bo
*bo
= parent
->entries
[pt_idx
].bo
;
1075 pt
= amdgpu_bo_gpu_offset(bo
);
1076 pt
= amdgpu_gart_get_vm_pde(adev
, pt
);
1077 /* Don't update huge pages here */
1078 if ((parent
->entries
[pt_idx
].addr
& AMDGPU_PDE_PTE
) ||
1079 parent
->entries
[pt_idx
].addr
== (pt
| AMDGPU_PTE_VALID
))
1082 parent
->entries
[pt_idx
].addr
= pt
| AMDGPU_PTE_VALID
;
1084 pde
= pd_addr
+ pt_idx
* 8;
1085 if (((last_pde
+ 8 * count
) != pde
) ||
1086 ((last_pt
+ incr
* count
) != pt
) ||
1087 (count
== AMDGPU_VM_MAX_UPDATE_SIZE
)) {
1091 params
.func(¶ms
,
1097 params
.func(¶ms
, last_pde
,
1098 last_pt
, count
, incr
,
1104 last_shadow
= shadow_addr
+ pt_idx
* 8;
1112 if (vm
->root
.bo
->shadow
)
1113 params
.func(¶ms
, last_shadow
, last_pt
,
1114 count
, incr
, AMDGPU_PTE_VALID
);
1116 params
.func(¶ms
, last_pde
, last_pt
,
1117 count
, incr
, AMDGPU_PTE_VALID
);
1120 if (!vm
->use_cpu_for_update
) {
1121 if (params
.ib
->length_dw
== 0) {
1122 amdgpu_job_free(job
);
1124 amdgpu_ring_pad_ib(ring
, params
.ib
);
1125 amdgpu_sync_resv(adev
, &job
->sync
, parent
->bo
->tbo
.resv
,
1126 AMDGPU_FENCE_OWNER_VM
);
1128 amdgpu_sync_resv(adev
, &job
->sync
,
1130 AMDGPU_FENCE_OWNER_VM
);
1132 WARN_ON(params
.ib
->length_dw
> ndw
);
1133 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
1134 AMDGPU_FENCE_OWNER_VM
, &fence
);
1138 amdgpu_bo_fence(parent
->bo
, fence
, true);
1139 dma_fence_put(vm
->last_dir_update
);
1140 vm
->last_dir_update
= dma_fence_get(fence
);
1141 dma_fence_put(fence
);
1145 * Recurse into the subdirectories. This recursion is harmless because
1146 * we only have a maximum of 5 layers.
1148 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
1149 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
1154 r
= amdgpu_vm_update_level(adev
, vm
, entry
, level
+ 1);
1162 amdgpu_job_free(job
);
1167 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
1169 * @parent: parent PD
1171 * Mark all PD level as invalid after an error.
1173 static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt
*parent
)
1178 * Recurse into the subdirectories. This recursion is harmless because
1179 * we only have a maximum of 5 layers.
1181 for (pt_idx
= 0; pt_idx
<= parent
->last_entry_used
; ++pt_idx
) {
1182 struct amdgpu_vm_pt
*entry
= &parent
->entries
[pt_idx
];
1187 entry
->addr
= ~0ULL;
1188 amdgpu_vm_invalidate_level(entry
);
1193 * amdgpu_vm_update_directories - make sure that all directories are valid
1195 * @adev: amdgpu_device pointer
1198 * Makes sure all directories are up to date.
1199 * Returns 0 for success, error for failure.
1201 int amdgpu_vm_update_directories(struct amdgpu_device
*adev
,
1202 struct amdgpu_vm
*vm
)
1206 r
= amdgpu_vm_update_level(adev
, vm
, &vm
->root
, 0);
1208 amdgpu_vm_invalidate_level(&vm
->root
);
1210 if (vm
->use_cpu_for_update
) {
1213 amdgpu_gart_flush_gpu_tlb(adev
, 0);
1220 * amdgpu_vm_find_entry - find the entry for an address
1222 * @p: see amdgpu_pte_update_params definition
1223 * @addr: virtual address in question
1224 * @entry: resulting entry or NULL
1225 * @parent: parent entry
1227 * Find the vm_pt entry and it's parent for the given address.
1229 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params
*p
, uint64_t addr
,
1230 struct amdgpu_vm_pt
**entry
,
1231 struct amdgpu_vm_pt
**parent
)
1233 unsigned idx
, level
= p
->adev
->vm_manager
.num_level
;
1236 *entry
= &p
->vm
->root
;
1237 while ((*entry
)->entries
) {
1238 idx
= addr
>> (p
->adev
->vm_manager
.block_size
* level
--);
1239 idx
%= amdgpu_bo_size((*entry
)->bo
) / 8;
1241 *entry
= &(*entry
)->entries
[idx
];
1249 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1251 * @p: see amdgpu_pte_update_params definition
1252 * @entry: vm_pt entry to check
1253 * @parent: parent entry
1254 * @nptes: number of PTEs updated with this operation
1255 * @dst: destination address where the PTEs should point to
1256 * @flags: access flags fro the PTEs
1258 * Check if we can update the PD with a huge page.
1260 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params
*p
,
1261 struct amdgpu_vm_pt
*entry
,
1262 struct amdgpu_vm_pt
*parent
,
1263 unsigned nptes
, uint64_t dst
,
1266 bool use_cpu_update
= (p
->func
== amdgpu_vm_cpu_set_ptes
);
1267 uint64_t pd_addr
, pde
;
1269 /* In the case of a mixed PT the PDE must point to it*/
1270 if (p
->adev
->asic_type
< CHIP_VEGA10
||
1271 nptes
!= AMDGPU_VM_PTE_COUNT(p
->adev
) ||
1273 !(flags
& AMDGPU_PTE_VALID
)) {
1275 dst
= amdgpu_bo_gpu_offset(entry
->bo
);
1276 dst
= amdgpu_gart_get_vm_pde(p
->adev
, dst
);
1277 flags
= AMDGPU_PTE_VALID
;
1279 /* Set the huge page flag to stop scanning at this PDE */
1280 flags
|= AMDGPU_PDE_PTE
;
1283 if (entry
->addr
== (dst
| flags
))
1286 entry
->addr
= (dst
| flags
);
1288 if (use_cpu_update
) {
1289 /* In case a huge page is replaced with a system
1290 * memory mapping, p->pages_addr != NULL and
1291 * amdgpu_vm_cpu_set_ptes would try to translate dst
1292 * through amdgpu_vm_map_gart. But dst is already a
1293 * GPU address (of the page table). Disable
1294 * amdgpu_vm_map_gart temporarily.
1298 tmp
= p
->pages_addr
;
1299 p
->pages_addr
= NULL
;
1301 pd_addr
= (unsigned long)amdgpu_bo_kptr(parent
->bo
);
1302 pde
= pd_addr
+ (entry
- parent
->entries
) * 8;
1303 amdgpu_vm_cpu_set_ptes(p
, pde
, dst
, 1, 0, flags
);
1305 p
->pages_addr
= tmp
;
1307 if (parent
->bo
->shadow
) {
1308 pd_addr
= amdgpu_bo_gpu_offset(parent
->bo
->shadow
);
1309 pde
= pd_addr
+ (entry
- parent
->entries
) * 8;
1310 amdgpu_vm_do_set_ptes(p
, pde
, dst
, 1, 0, flags
);
1312 pd_addr
= amdgpu_bo_gpu_offset(parent
->bo
);
1313 pde
= pd_addr
+ (entry
- parent
->entries
) * 8;
1314 amdgpu_vm_do_set_ptes(p
, pde
, dst
, 1, 0, flags
);
1319 * amdgpu_vm_update_ptes - make sure that page tables are valid
1321 * @params: see amdgpu_pte_update_params definition
1323 * @start: start of GPU address range
1324 * @end: end of GPU address range
1325 * @dst: destination address to map to, the next dst inside the function
1326 * @flags: mapping flags
1328 * Update the page tables in the range @start - @end.
1329 * Returns 0 for success, -EINVAL for failure.
1331 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params
*params
,
1332 uint64_t start
, uint64_t end
,
1333 uint64_t dst
, uint64_t flags
)
1335 struct amdgpu_device
*adev
= params
->adev
;
1336 const uint64_t mask
= AMDGPU_VM_PTE_COUNT(adev
) - 1;
1338 uint64_t addr
, pe_start
;
1339 struct amdgpu_bo
*pt
;
1341 bool use_cpu_update
= (params
->func
== amdgpu_vm_cpu_set_ptes
);
1343 /* walk over the address space and update the page tables */
1344 for (addr
= start
; addr
< end
; addr
+= nptes
,
1345 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
) {
1346 struct amdgpu_vm_pt
*entry
, *parent
;
1348 amdgpu_vm_get_entry(params
, addr
, &entry
, &parent
);
1352 if ((addr
& ~mask
) == (end
& ~mask
))
1355 nptes
= AMDGPU_VM_PTE_COUNT(adev
) - (addr
& mask
);
1357 amdgpu_vm_handle_huge_pages(params
, entry
, parent
,
1359 /* We don't need to update PTEs for huge pages */
1360 if (entry
->addr
& AMDGPU_PDE_PTE
)
1364 if (use_cpu_update
) {
1365 pe_start
= (unsigned long)amdgpu_bo_kptr(pt
);
1368 pe_start
= amdgpu_bo_gpu_offset(pt
->shadow
);
1369 pe_start
+= (addr
& mask
) * 8;
1370 params
->func(params
, pe_start
, dst
, nptes
,
1371 AMDGPU_GPU_PAGE_SIZE
, flags
);
1373 pe_start
= amdgpu_bo_gpu_offset(pt
);
1376 pe_start
+= (addr
& mask
) * 8;
1377 params
->func(params
, pe_start
, dst
, nptes
,
1378 AMDGPU_GPU_PAGE_SIZE
, flags
);
1385 * amdgpu_vm_frag_ptes - add fragment information to PTEs
1387 * @params: see amdgpu_pte_update_params definition
1389 * @start: first PTE to handle
1390 * @end: last PTE to handle
1391 * @dst: addr those PTEs should point to
1392 * @flags: hw mapping flags
1393 * Returns 0 for success, -EINVAL for failure.
1395 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params
*params
,
1396 uint64_t start
, uint64_t end
,
1397 uint64_t dst
, uint64_t flags
)
1402 * The MC L1 TLB supports variable sized pages, based on a fragment
1403 * field in the PTE. When this field is set to a non-zero value, page
1404 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1405 * flags are considered valid for all PTEs within the fragment range
1406 * and corresponding mappings are assumed to be physically contiguous.
1408 * The L1 TLB can store a single PTE for the whole fragment,
1409 * significantly increasing the space available for translation
1410 * caching. This leads to large improvements in throughput when the
1411 * TLB is under pressure.
1413 * The L2 TLB distributes small and large fragments into two
1414 * asymmetric partitions. The large fragment cache is significantly
1415 * larger. Thus, we try to use large fragments wherever possible.
1416 * Userspace can support this by aligning virtual base address and
1417 * allocation size to the fragment size.
1419 unsigned pages_per_frag
= params
->adev
->vm_manager
.fragment_size
;
1420 uint64_t frag_flags
= AMDGPU_PTE_FRAG(pages_per_frag
);
1421 uint64_t frag_align
= 1 << pages_per_frag
;
1423 uint64_t frag_start
= ALIGN(start
, frag_align
);
1424 uint64_t frag_end
= end
& ~(frag_align
- 1);
1426 /* system pages are non continuously */
1427 if (params
->src
|| !(flags
& AMDGPU_PTE_VALID
) ||
1428 (frag_start
>= frag_end
))
1429 return amdgpu_vm_update_ptes(params
, start
, end
, dst
, flags
);
1431 /* handle the 4K area at the beginning */
1432 if (start
!= frag_start
) {
1433 r
= amdgpu_vm_update_ptes(params
, start
, frag_start
,
1437 dst
+= (frag_start
- start
) * AMDGPU_GPU_PAGE_SIZE
;
1440 /* handle the area in the middle */
1441 r
= amdgpu_vm_update_ptes(params
, frag_start
, frag_end
, dst
,
1442 flags
| frag_flags
);
1446 /* handle the 4K area at the end */
1447 if (frag_end
!= end
) {
1448 dst
+= (frag_end
- frag_start
) * AMDGPU_GPU_PAGE_SIZE
;
1449 r
= amdgpu_vm_update_ptes(params
, frag_end
, end
, dst
, flags
);
1455 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1457 * @adev: amdgpu_device pointer
1458 * @exclusive: fence we need to sync to
1459 * @src: address where to copy page table entries from
1460 * @pages_addr: DMA addresses to use for mapping
1462 * @start: start of mapped range
1463 * @last: last mapped entry
1464 * @flags: flags for the entries
1465 * @addr: addr to set the area to
1466 * @fence: optional resulting fence
1468 * Fill in the page table entries between @start and @last.
1469 * Returns 0 for success, -EINVAL for failure.
1471 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
1472 struct dma_fence
*exclusive
,
1474 dma_addr_t
*pages_addr
,
1475 struct amdgpu_vm
*vm
,
1476 uint64_t start
, uint64_t last
,
1477 uint64_t flags
, uint64_t addr
,
1478 struct dma_fence
**fence
)
1480 struct amdgpu_ring
*ring
;
1481 void *owner
= AMDGPU_FENCE_OWNER_VM
;
1482 unsigned nptes
, ncmds
, ndw
;
1483 struct amdgpu_job
*job
;
1484 struct amdgpu_pte_update_params params
;
1485 struct dma_fence
*f
= NULL
;
1488 memset(¶ms
, 0, sizeof(params
));
1493 /* sync to everything on unmapping */
1494 if (!(flags
& AMDGPU_PTE_VALID
))
1495 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
1497 if (vm
->use_cpu_for_update
) {
1498 /* params.src is used as flag to indicate system Memory */
1502 /* Wait for PT BOs to be free. PTs share the same resv. object
1505 r
= amdgpu_vm_wait_pd(adev
, vm
, owner
);
1509 params
.func
= amdgpu_vm_cpu_set_ptes
;
1510 params
.pages_addr
= pages_addr
;
1511 return amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1,
1515 ring
= container_of(vm
->entity
.sched
, struct amdgpu_ring
, sched
);
1517 nptes
= last
- start
+ 1;
1520 * reserve space for one command every (1 << BLOCK_SIZE)
1521 * entries or 2k dwords (whatever is smaller)
1523 ncmds
= (nptes
>> min(adev
->vm_manager
.block_size
, 11u)) + 1;
1528 /* one PDE write for each huge page */
1529 ndw
+= ((nptes
>> adev
->vm_manager
.block_size
) + 1) * 6;
1532 /* only copy commands needed */
1535 params
.func
= amdgpu_vm_do_copy_ptes
;
1537 } else if (pages_addr
) {
1538 /* copy commands needed */
1544 params
.func
= amdgpu_vm_do_copy_ptes
;
1547 /* set page commands needed */
1550 /* two extra commands for begin/end of fragment */
1553 params
.func
= amdgpu_vm_do_set_ptes
;
1556 r
= amdgpu_job_alloc_with_ib(adev
, ndw
* 4, &job
);
1560 params
.ib
= &job
->ibs
[0];
1562 if (!src
&& pages_addr
) {
1566 /* Put the PTEs at the end of the IB. */
1567 i
= ndw
- nptes
* 2;
1568 pte
= (uint64_t *)&(job
->ibs
->ptr
[i
]);
1569 params
.src
= job
->ibs
->gpu_addr
+ i
* 4;
1571 for (i
= 0; i
< nptes
; ++i
) {
1572 pte
[i
] = amdgpu_vm_map_gart(pages_addr
, addr
+ i
*
1573 AMDGPU_GPU_PAGE_SIZE
);
1579 r
= amdgpu_sync_fence(adev
, &job
->sync
, exclusive
);
1583 r
= amdgpu_sync_resv(adev
, &job
->sync
, vm
->root
.bo
->tbo
.resv
,
1588 r
= reservation_object_reserve_shared(vm
->root
.bo
->tbo
.resv
);
1592 r
= amdgpu_vm_frag_ptes(¶ms
, start
, last
+ 1, addr
, flags
);
1596 amdgpu_ring_pad_ib(ring
, params
.ib
);
1597 WARN_ON(params
.ib
->length_dw
> ndw
);
1598 r
= amdgpu_job_submit(job
, ring
, &vm
->entity
,
1599 AMDGPU_FENCE_OWNER_VM
, &f
);
1603 amdgpu_bo_fence(vm
->root
.bo
, f
, true);
1604 dma_fence_put(*fence
);
1609 amdgpu_job_free(job
);
1610 amdgpu_vm_invalidate_level(&vm
->root
);
1615 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1617 * @adev: amdgpu_device pointer
1618 * @exclusive: fence we need to sync to
1619 * @pages_addr: DMA addresses to use for mapping
1621 * @mapping: mapped range and flags to use for the update
1622 * @flags: HW flags for the mapping
1623 * @nodes: array of drm_mm_nodes with the MC addresses
1624 * @fence: optional resulting fence
1626 * Split the mapping into smaller chunks so that each update fits
1628 * Returns 0 for success, -EINVAL for failure.
1630 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device
*adev
,
1631 struct dma_fence
*exclusive
,
1632 dma_addr_t
*pages_addr
,
1633 struct amdgpu_vm
*vm
,
1634 struct amdgpu_bo_va_mapping
*mapping
,
1636 struct drm_mm_node
*nodes
,
1637 struct dma_fence
**fence
)
1639 uint64_t pfn
, src
= 0, start
= mapping
->start
;
1642 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1643 * but in case of something, we filter the flags in first place
1645 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
1646 flags
&= ~AMDGPU_PTE_READABLE
;
1647 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
1648 flags
&= ~AMDGPU_PTE_WRITEABLE
;
1650 flags
&= ~AMDGPU_PTE_EXECUTABLE
;
1651 flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
1653 flags
&= ~AMDGPU_PTE_MTYPE_MASK
;
1654 flags
|= (mapping
->flags
& AMDGPU_PTE_MTYPE_MASK
);
1656 if ((mapping
->flags
& AMDGPU_PTE_PRT
) &&
1657 (adev
->asic_type
>= CHIP_VEGA10
)) {
1658 flags
|= AMDGPU_PTE_PRT
;
1659 flags
&= ~AMDGPU_PTE_VALID
;
1662 trace_amdgpu_vm_bo_update(mapping
);
1664 pfn
= mapping
->offset
>> PAGE_SHIFT
;
1666 while (pfn
>= nodes
->size
) {
1673 uint64_t max_entries
;
1674 uint64_t addr
, last
;
1677 addr
= nodes
->start
<< PAGE_SHIFT
;
1678 max_entries
= (nodes
->size
- pfn
) *
1679 (PAGE_SIZE
/ AMDGPU_GPU_PAGE_SIZE
);
1682 max_entries
= S64_MAX
;
1686 max_entries
= min(max_entries
, 16ull * 1024ull);
1688 } else if (flags
& AMDGPU_PTE_VALID
) {
1689 addr
+= adev
->vm_manager
.vram_base_offset
;
1691 addr
+= pfn
<< PAGE_SHIFT
;
1693 last
= min((uint64_t)mapping
->last
, start
+ max_entries
- 1);
1694 r
= amdgpu_vm_bo_update_mapping(adev
, exclusive
,
1695 src
, pages_addr
, vm
,
1696 start
, last
, flags
, addr
,
1701 pfn
+= last
- start
+ 1;
1702 if (nodes
&& nodes
->size
== pfn
) {
1708 } while (unlikely(start
!= mapping
->last
+ 1));
1714 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1716 * @adev: amdgpu_device pointer
1717 * @bo_va: requested BO and VM object
1718 * @clear: if true clear the entries
1720 * Fill in the page table entries for @bo_va.
1721 * Returns 0 for success, -EINVAL for failure.
1723 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
1724 struct amdgpu_bo_va
*bo_va
,
1727 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
1728 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
1729 struct amdgpu_bo_va_mapping
*mapping
;
1730 dma_addr_t
*pages_addr
= NULL
;
1731 struct ttm_mem_reg
*mem
;
1732 struct drm_mm_node
*nodes
;
1733 struct dma_fence
*exclusive
;
1737 if (clear
|| !bo_va
->base
.bo
) {
1742 struct ttm_dma_tt
*ttm
;
1744 mem
= &bo_va
->base
.bo
->tbo
.mem
;
1745 nodes
= mem
->mm_node
;
1746 if (mem
->mem_type
== TTM_PL_TT
) {
1747 ttm
= container_of(bo_va
->base
.bo
->tbo
.ttm
,
1748 struct ttm_dma_tt
, ttm
);
1749 pages_addr
= ttm
->dma_address
;
1751 exclusive
= reservation_object_get_excl(bo
->tbo
.resv
);
1755 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo
->tbo
.ttm
, mem
);
1759 spin_lock(&vm
->status_lock
);
1760 if (!list_empty(&bo_va
->base
.vm_status
))
1761 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
1762 spin_unlock(&vm
->status_lock
);
1764 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1765 r
= amdgpu_vm_bo_split_mapping(adev
, exclusive
, pages_addr
, vm
,
1766 mapping
, flags
, nodes
,
1767 &bo_va
->last_pt_update
);
1772 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1773 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
1774 trace_amdgpu_vm_bo_mapping(mapping
);
1776 list_for_each_entry(mapping
, &bo_va
->invalids
, list
)
1777 trace_amdgpu_vm_bo_mapping(mapping
);
1780 spin_lock(&vm
->status_lock
);
1781 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
1782 list_del_init(&bo_va
->base
.vm_status
);
1784 list_add(&bo_va
->base
.vm_status
, &vm
->cleared
);
1785 spin_unlock(&vm
->status_lock
);
1787 if (vm
->use_cpu_for_update
) {
1790 amdgpu_gart_flush_gpu_tlb(adev
, 0);
1797 * amdgpu_vm_update_prt_state - update the global PRT state
1799 static void amdgpu_vm_update_prt_state(struct amdgpu_device
*adev
)
1801 unsigned long flags
;
1804 spin_lock_irqsave(&adev
->vm_manager
.prt_lock
, flags
);
1805 enable
= !!atomic_read(&adev
->vm_manager
.num_prt_users
);
1806 adev
->gart
.gart_funcs
->set_prt(adev
, enable
);
1807 spin_unlock_irqrestore(&adev
->vm_manager
.prt_lock
, flags
);
1811 * amdgpu_vm_prt_get - add a PRT user
1813 static void amdgpu_vm_prt_get(struct amdgpu_device
*adev
)
1815 if (!adev
->gart
.gart_funcs
->set_prt
)
1818 if (atomic_inc_return(&adev
->vm_manager
.num_prt_users
) == 1)
1819 amdgpu_vm_update_prt_state(adev
);
1823 * amdgpu_vm_prt_put - drop a PRT user
1825 static void amdgpu_vm_prt_put(struct amdgpu_device
*adev
)
1827 if (atomic_dec_return(&adev
->vm_manager
.num_prt_users
) == 0)
1828 amdgpu_vm_update_prt_state(adev
);
1832 * amdgpu_vm_prt_cb - callback for updating the PRT status
1834 static void amdgpu_vm_prt_cb(struct dma_fence
*fence
, struct dma_fence_cb
*_cb
)
1836 struct amdgpu_prt_cb
*cb
= container_of(_cb
, struct amdgpu_prt_cb
, cb
);
1838 amdgpu_vm_prt_put(cb
->adev
);
1843 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1845 static void amdgpu_vm_add_prt_cb(struct amdgpu_device
*adev
,
1846 struct dma_fence
*fence
)
1848 struct amdgpu_prt_cb
*cb
;
1850 if (!adev
->gart
.gart_funcs
->set_prt
)
1853 cb
= kmalloc(sizeof(struct amdgpu_prt_cb
), GFP_KERNEL
);
1855 /* Last resort when we are OOM */
1857 dma_fence_wait(fence
, false);
1859 amdgpu_vm_prt_put(adev
);
1862 if (!fence
|| dma_fence_add_callback(fence
, &cb
->cb
,
1864 amdgpu_vm_prt_cb(fence
, &cb
->cb
);
1869 * amdgpu_vm_free_mapping - free a mapping
1871 * @adev: amdgpu_device pointer
1873 * @mapping: mapping to be freed
1874 * @fence: fence of the unmap operation
1876 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1878 static void amdgpu_vm_free_mapping(struct amdgpu_device
*adev
,
1879 struct amdgpu_vm
*vm
,
1880 struct amdgpu_bo_va_mapping
*mapping
,
1881 struct dma_fence
*fence
)
1883 if (mapping
->flags
& AMDGPU_PTE_PRT
)
1884 amdgpu_vm_add_prt_cb(adev
, fence
);
1889 * amdgpu_vm_prt_fini - finish all prt mappings
1891 * @adev: amdgpu_device pointer
1894 * Register a cleanup callback to disable PRT support after VM dies.
1896 static void amdgpu_vm_prt_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1898 struct reservation_object
*resv
= vm
->root
.bo
->tbo
.resv
;
1899 struct dma_fence
*excl
, **shared
;
1900 unsigned i
, shared_count
;
1903 r
= reservation_object_get_fences_rcu(resv
, &excl
,
1904 &shared_count
, &shared
);
1906 /* Not enough memory to grab the fence list, as last resort
1907 * block for all the fences to complete.
1909 reservation_object_wait_timeout_rcu(resv
, true, false,
1910 MAX_SCHEDULE_TIMEOUT
);
1914 /* Add a callback for each fence in the reservation object */
1915 amdgpu_vm_prt_get(adev
);
1916 amdgpu_vm_add_prt_cb(adev
, excl
);
1918 for (i
= 0; i
< shared_count
; ++i
) {
1919 amdgpu_vm_prt_get(adev
);
1920 amdgpu_vm_add_prt_cb(adev
, shared
[i
]);
1927 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1929 * @adev: amdgpu_device pointer
1931 * @fence: optional resulting fence (unchanged if no work needed to be done
1932 * or if an error occurred)
1934 * Make sure all freed BOs are cleared in the PT.
1935 * Returns 0 for success.
1937 * PTs have to be reserved and mutex must be locked!
1939 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
1940 struct amdgpu_vm
*vm
,
1941 struct dma_fence
**fence
)
1943 struct amdgpu_bo_va_mapping
*mapping
;
1944 struct dma_fence
*f
= NULL
;
1946 uint64_t init_pte_value
= 0;
1948 while (!list_empty(&vm
->freed
)) {
1949 mapping
= list_first_entry(&vm
->freed
,
1950 struct amdgpu_bo_va_mapping
, list
);
1951 list_del(&mapping
->list
);
1953 if (vm
->pte_support_ats
)
1954 init_pte_value
= AMDGPU_PTE_SYSTEM
;
1956 r
= amdgpu_vm_bo_update_mapping(adev
, NULL
, 0, NULL
, vm
,
1957 mapping
->start
, mapping
->last
,
1958 init_pte_value
, 0, &f
);
1959 amdgpu_vm_free_mapping(adev
, vm
, mapping
, f
);
1967 dma_fence_put(*fence
);
1978 * amdgpu_vm_clear_moved - clear moved BOs in the PT
1980 * @adev: amdgpu_device pointer
1983 * Make sure all moved BOs are cleared in the PT.
1984 * Returns 0 for success.
1986 * PTs have to be reserved and mutex must be locked!
1988 int amdgpu_vm_clear_moved(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
1989 struct amdgpu_sync
*sync
)
1991 struct amdgpu_bo_va
*bo_va
= NULL
;
1994 spin_lock(&vm
->status_lock
);
1995 while (!list_empty(&vm
->moved
)) {
1996 bo_va
= list_first_entry(&vm
->moved
,
1997 struct amdgpu_bo_va
, base
.vm_status
);
1998 spin_unlock(&vm
->status_lock
);
2000 r
= amdgpu_vm_bo_update(adev
, bo_va
, true);
2004 spin_lock(&vm
->status_lock
);
2006 spin_unlock(&vm
->status_lock
);
2009 r
= amdgpu_sync_fence(adev
, sync
, bo_va
->last_pt_update
);
2015 * amdgpu_vm_bo_add - add a bo to a specific vm
2017 * @adev: amdgpu_device pointer
2019 * @bo: amdgpu buffer object
2021 * Add @bo into the requested vm.
2022 * Add @bo to the list of bos associated with the vm
2023 * Returns newly added bo_va or NULL for failure
2025 * Object has to be reserved!
2027 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
2028 struct amdgpu_vm
*vm
,
2029 struct amdgpu_bo
*bo
)
2031 struct amdgpu_bo_va
*bo_va
;
2033 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
2034 if (bo_va
== NULL
) {
2037 bo_va
->base
.vm
= vm
;
2038 bo_va
->base
.bo
= bo
;
2039 INIT_LIST_HEAD(&bo_va
->base
.bo_list
);
2040 INIT_LIST_HEAD(&bo_va
->base
.vm_status
);
2042 bo_va
->ref_count
= 1;
2043 INIT_LIST_HEAD(&bo_va
->valids
);
2044 INIT_LIST_HEAD(&bo_va
->invalids
);
2047 list_add_tail(&bo_va
->base
.bo_list
, &bo
->va
);
2053 * amdgpu_vm_bo_map - map bo inside a vm
2055 * @adev: amdgpu_device pointer
2056 * @bo_va: bo_va to store the address
2057 * @saddr: where to map the BO
2058 * @offset: requested offset in the BO
2059 * @flags: attributes of pages (read/write/valid/etc.)
2061 * Add a mapping of the BO at the specefied addr into the VM.
2062 * Returns 0 for success, error for failure.
2064 * Object has to be reserved and unreserved outside!
2066 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
2067 struct amdgpu_bo_va
*bo_va
,
2068 uint64_t saddr
, uint64_t offset
,
2069 uint64_t size
, uint64_t flags
)
2071 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2072 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2073 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2076 /* validate the parameters */
2077 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
2078 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
2081 /* make sure object fit at this offset */
2082 eaddr
= saddr
+ size
- 1;
2083 if (saddr
>= eaddr
||
2084 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)))
2087 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2088 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2090 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2092 /* bo and tmp overlap, invalid addr */
2093 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2094 "0x%010Lx-0x%010Lx\n", bo
, saddr
, eaddr
,
2095 tmp
->start
, tmp
->last
+ 1);
2099 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2103 INIT_LIST_HEAD(&mapping
->list
);
2104 mapping
->start
= saddr
;
2105 mapping
->last
= eaddr
;
2106 mapping
->offset
= offset
;
2107 mapping
->flags
= flags
;
2109 list_add(&mapping
->list
, &bo_va
->invalids
);
2110 amdgpu_vm_it_insert(mapping
, &vm
->va
);
2112 if (flags
& AMDGPU_PTE_PRT
)
2113 amdgpu_vm_prt_get(adev
);
2119 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2121 * @adev: amdgpu_device pointer
2122 * @bo_va: bo_va to store the address
2123 * @saddr: where to map the BO
2124 * @offset: requested offset in the BO
2125 * @flags: attributes of pages (read/write/valid/etc.)
2127 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2128 * mappings as we do so.
2129 * Returns 0 for success, error for failure.
2131 * Object has to be reserved and unreserved outside!
2133 int amdgpu_vm_bo_replace_map(struct amdgpu_device
*adev
,
2134 struct amdgpu_bo_va
*bo_va
,
2135 uint64_t saddr
, uint64_t offset
,
2136 uint64_t size
, uint64_t flags
)
2138 struct amdgpu_bo_va_mapping
*mapping
;
2139 struct amdgpu_bo
*bo
= bo_va
->base
.bo
;
2140 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2144 /* validate the parameters */
2145 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
2146 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
2149 /* make sure object fit at this offset */
2150 eaddr
= saddr
+ size
- 1;
2151 if (saddr
>= eaddr
||
2152 (bo
&& offset
+ size
> amdgpu_bo_size(bo
)))
2155 /* Allocate all the needed memory */
2156 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
2160 r
= amdgpu_vm_bo_clear_mappings(adev
, bo_va
->base
.vm
, saddr
, size
);
2166 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2167 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2169 mapping
->start
= saddr
;
2170 mapping
->last
= eaddr
;
2171 mapping
->offset
= offset
;
2172 mapping
->flags
= flags
;
2174 list_add(&mapping
->list
, &bo_va
->invalids
);
2175 amdgpu_vm_it_insert(mapping
, &vm
->va
);
2177 if (flags
& AMDGPU_PTE_PRT
)
2178 amdgpu_vm_prt_get(adev
);
2184 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2186 * @adev: amdgpu_device pointer
2187 * @bo_va: bo_va to remove the address from
2188 * @saddr: where to the BO is mapped
2190 * Remove a mapping of the BO at the specefied addr from the VM.
2191 * Returns 0 for success, error for failure.
2193 * Object has to be reserved and unreserved outside!
2195 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
2196 struct amdgpu_bo_va
*bo_va
,
2199 struct amdgpu_bo_va_mapping
*mapping
;
2200 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2203 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2205 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
2206 if (mapping
->start
== saddr
)
2210 if (&mapping
->list
== &bo_va
->valids
) {
2213 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
2214 if (mapping
->start
== saddr
)
2218 if (&mapping
->list
== &bo_va
->invalids
)
2222 list_del(&mapping
->list
);
2223 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2224 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2227 list_add(&mapping
->list
, &vm
->freed
);
2229 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2230 bo_va
->last_pt_update
);
2236 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2238 * @adev: amdgpu_device pointer
2239 * @vm: VM structure to use
2240 * @saddr: start of the range
2241 * @size: size of the range
2243 * Remove all mappings in a range, split them as appropriate.
2244 * Returns 0 for success, error for failure.
2246 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device
*adev
,
2247 struct amdgpu_vm
*vm
,
2248 uint64_t saddr
, uint64_t size
)
2250 struct amdgpu_bo_va_mapping
*before
, *after
, *tmp
, *next
;
2254 eaddr
= saddr
+ size
- 1;
2255 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
2256 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
2258 /* Allocate all the needed memory */
2259 before
= kzalloc(sizeof(*before
), GFP_KERNEL
);
2262 INIT_LIST_HEAD(&before
->list
);
2264 after
= kzalloc(sizeof(*after
), GFP_KERNEL
);
2269 INIT_LIST_HEAD(&after
->list
);
2271 /* Now gather all removed mappings */
2272 tmp
= amdgpu_vm_it_iter_first(&vm
->va
, saddr
, eaddr
);
2274 /* Remember mapping split at the start */
2275 if (tmp
->start
< saddr
) {
2276 before
->start
= tmp
->start
;
2277 before
->last
= saddr
- 1;
2278 before
->offset
= tmp
->offset
;
2279 before
->flags
= tmp
->flags
;
2280 list_add(&before
->list
, &tmp
->list
);
2283 /* Remember mapping split at the end */
2284 if (tmp
->last
> eaddr
) {
2285 after
->start
= eaddr
+ 1;
2286 after
->last
= tmp
->last
;
2287 after
->offset
= tmp
->offset
;
2288 after
->offset
+= after
->start
- tmp
->start
;
2289 after
->flags
= tmp
->flags
;
2290 list_add(&after
->list
, &tmp
->list
);
2293 list_del(&tmp
->list
);
2294 list_add(&tmp
->list
, &removed
);
2296 tmp
= amdgpu_vm_it_iter_next(tmp
, saddr
, eaddr
);
2299 /* And free them up */
2300 list_for_each_entry_safe(tmp
, next
, &removed
, list
) {
2301 amdgpu_vm_it_remove(tmp
, &vm
->va
);
2302 list_del(&tmp
->list
);
2304 if (tmp
->start
< saddr
)
2306 if (tmp
->last
> eaddr
)
2309 list_add(&tmp
->list
, &vm
->freed
);
2310 trace_amdgpu_vm_bo_unmap(NULL
, tmp
);
2313 /* Insert partial mapping before the range */
2314 if (!list_empty(&before
->list
)) {
2315 amdgpu_vm_it_insert(before
, &vm
->va
);
2316 if (before
->flags
& AMDGPU_PTE_PRT
)
2317 amdgpu_vm_prt_get(adev
);
2322 /* Insert partial mapping after the range */
2323 if (!list_empty(&after
->list
)) {
2324 amdgpu_vm_it_insert(after
, &vm
->va
);
2325 if (after
->flags
& AMDGPU_PTE_PRT
)
2326 amdgpu_vm_prt_get(adev
);
2335 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2337 * @adev: amdgpu_device pointer
2338 * @bo_va: requested bo_va
2340 * Remove @bo_va->bo from the requested vm.
2342 * Object have to be reserved!
2344 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
2345 struct amdgpu_bo_va
*bo_va
)
2347 struct amdgpu_bo_va_mapping
*mapping
, *next
;
2348 struct amdgpu_vm
*vm
= bo_va
->base
.vm
;
2350 list_del(&bo_va
->base
.bo_list
);
2352 spin_lock(&vm
->status_lock
);
2353 list_del(&bo_va
->base
.vm_status
);
2354 spin_unlock(&vm
->status_lock
);
2356 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
2357 list_del(&mapping
->list
);
2358 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2359 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
2360 list_add(&mapping
->list
, &vm
->freed
);
2362 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
2363 list_del(&mapping
->list
);
2364 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2365 amdgpu_vm_free_mapping(adev
, vm
, mapping
,
2366 bo_va
->last_pt_update
);
2369 dma_fence_put(bo_va
->last_pt_update
);
2374 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2376 * @adev: amdgpu_device pointer
2378 * @bo: amdgpu buffer object
2380 * Mark @bo as invalid.
2382 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
2383 struct amdgpu_bo
*bo
)
2385 struct amdgpu_vm_bo_base
*bo_base
;
2387 list_for_each_entry(bo_base
, &bo
->va
, bo_list
) {
2388 spin_lock(&bo_base
->vm
->status_lock
);
2389 if (list_empty(&bo_base
->vm_status
))
2390 list_add(&bo_base
->vm_status
,
2391 &bo_base
->vm
->moved
);
2392 spin_unlock(&bo_base
->vm
->status_lock
);
2396 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size
)
2398 /* Total bits covered by PD + PTs */
2399 unsigned bits
= ilog2(vm_size
) + 18;
2401 /* Make sure the PD is 4K in size up to 8GB address space.
2402 Above that split equal between PD and PTs */
2406 return ((bits
+ 3) / 2);
2410 * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
2412 * @adev: amdgpu_device pointer
2413 * @fragment_size_default: the default fragment size if it's set auto
2415 void amdgpu_vm_set_fragment_size(struct amdgpu_device
*adev
, uint32_t fragment_size_default
)
2417 if (amdgpu_vm_fragment_size
== -1)
2418 adev
->vm_manager
.fragment_size
= fragment_size_default
;
2420 adev
->vm_manager
.fragment_size
= amdgpu_vm_fragment_size
;
2424 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2426 * @adev: amdgpu_device pointer
2427 * @vm_size: the default vm size if it's set auto
2429 void amdgpu_vm_adjust_size(struct amdgpu_device
*adev
, uint64_t vm_size
, uint32_t fragment_size_default
)
2431 /* adjust vm size firstly */
2432 if (amdgpu_vm_size
== -1)
2433 adev
->vm_manager
.vm_size
= vm_size
;
2435 adev
->vm_manager
.vm_size
= amdgpu_vm_size
;
2437 /* block size depends on vm size */
2438 if (amdgpu_vm_block_size
== -1)
2439 adev
->vm_manager
.block_size
=
2440 amdgpu_vm_get_block_size(adev
->vm_manager
.vm_size
);
2442 adev
->vm_manager
.block_size
= amdgpu_vm_block_size
;
2444 amdgpu_vm_set_fragment_size(adev
, fragment_size_default
);
2446 DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
2447 adev
->vm_manager
.vm_size
, adev
->vm_manager
.block_size
,
2448 adev
->vm_manager
.fragment_size
);
2452 * amdgpu_vm_init - initialize a vm instance
2454 * @adev: amdgpu_device pointer
2456 * @vm_context: Indicates if it GFX or Compute context
2460 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
2463 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
2464 AMDGPU_VM_PTE_COUNT(adev
) * 8);
2465 unsigned ring_instance
;
2466 struct amdgpu_ring
*ring
;
2467 struct amd_sched_rq
*rq
;
2470 uint64_t init_pde_value
= 0;
2473 vm
->client_id
= atomic64_inc_return(&adev
->vm_manager
.client_counter
);
2474 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2475 vm
->reserved_vmid
[i
] = NULL
;
2476 spin_lock_init(&vm
->status_lock
);
2477 INIT_LIST_HEAD(&vm
->moved
);
2478 INIT_LIST_HEAD(&vm
->cleared
);
2479 INIT_LIST_HEAD(&vm
->freed
);
2481 /* create scheduler entity for page table updates */
2483 ring_instance
= atomic_inc_return(&adev
->vm_manager
.vm_pte_next_ring
);
2484 ring_instance
%= adev
->vm_manager
.vm_pte_num_rings
;
2485 ring
= adev
->vm_manager
.vm_pte_rings
[ring_instance
];
2486 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_KERNEL
];
2487 r
= amd_sched_entity_init(&ring
->sched
, &vm
->entity
,
2488 rq
, amdgpu_sched_jobs
);
2492 vm
->pte_support_ats
= false;
2494 if (vm_context
== AMDGPU_VM_CONTEXT_COMPUTE
) {
2495 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2496 AMDGPU_VM_USE_CPU_FOR_COMPUTE
);
2498 if (adev
->asic_type
== CHIP_RAVEN
) {
2499 vm
->pte_support_ats
= true;
2500 init_pde_value
= AMDGPU_PTE_SYSTEM
| AMDGPU_PDE_PTE
;
2503 vm
->use_cpu_for_update
= !!(adev
->vm_manager
.vm_update_mode
&
2504 AMDGPU_VM_USE_CPU_FOR_GFX
);
2505 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2506 vm
->use_cpu_for_update
? "CPU" : "SDMA");
2507 WARN_ONCE((vm
->use_cpu_for_update
& !amdgpu_vm_is_large_bar(adev
)),
2508 "CPU update of VM recommended only for large BAR system\n");
2509 vm
->last_dir_update
= NULL
;
2511 flags
= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
2512 AMDGPU_GEM_CREATE_VRAM_CLEARED
;
2513 if (vm
->use_cpu_for_update
)
2514 flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
2516 flags
|= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
2517 AMDGPU_GEM_CREATE_SHADOW
);
2519 r
= amdgpu_bo_create(adev
, amdgpu_vm_bo_size(adev
, 0), align
, true,
2520 AMDGPU_GEM_DOMAIN_VRAM
,
2522 NULL
, NULL
, init_pde_value
, &vm
->root
.bo
);
2524 goto error_free_sched_entity
;
2526 r
= amdgpu_bo_reserve(vm
->root
.bo
, false);
2528 goto error_free_root
;
2530 vm
->last_eviction_counter
= atomic64_read(&adev
->num_evictions
);
2532 if (vm
->use_cpu_for_update
) {
2533 r
= amdgpu_bo_kmap(vm
->root
.bo
, NULL
);
2535 goto error_free_root
;
2538 amdgpu_bo_unreserve(vm
->root
.bo
);
2543 amdgpu_bo_unref(&vm
->root
.bo
->shadow
);
2544 amdgpu_bo_unref(&vm
->root
.bo
);
2547 error_free_sched_entity
:
2548 amd_sched_entity_fini(&ring
->sched
, &vm
->entity
);
2554 * amdgpu_vm_free_levels - free PD/PT levels
2556 * @level: PD/PT starting level to free
2558 * Free the page directory or page table level and all sub levels.
2560 static void amdgpu_vm_free_levels(struct amdgpu_vm_pt
*level
)
2565 amdgpu_bo_unref(&level
->bo
->shadow
);
2566 amdgpu_bo_unref(&level
->bo
);
2570 for (i
= 0; i
<= level
->last_entry_used
; i
++)
2571 amdgpu_vm_free_levels(&level
->entries
[i
]);
2573 kvfree(level
->entries
);
2577 * amdgpu_vm_fini - tear down a vm instance
2579 * @adev: amdgpu_device pointer
2583 * Unbind the VM and remove all bos from the vm bo list
2585 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
2587 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
2588 bool prt_fini_needed
= !!adev
->gart
.gart_funcs
->set_prt
;
2591 amd_sched_entity_fini(vm
->entity
.sched
, &vm
->entity
);
2593 if (!RB_EMPTY_ROOT(&vm
->va
)) {
2594 dev_err(adev
->dev
, "still active bo inside vm\n");
2596 rbtree_postorder_for_each_entry_safe(mapping
, tmp
, &vm
->va
, rb
) {
2597 list_del(&mapping
->list
);
2598 amdgpu_vm_it_remove(mapping
, &vm
->va
);
2601 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
2602 if (mapping
->flags
& AMDGPU_PTE_PRT
&& prt_fini_needed
) {
2603 amdgpu_vm_prt_fini(adev
, vm
);
2604 prt_fini_needed
= false;
2607 list_del(&mapping
->list
);
2608 amdgpu_vm_free_mapping(adev
, vm
, mapping
, NULL
);
2611 amdgpu_vm_free_levels(&vm
->root
);
2612 dma_fence_put(vm
->last_dir_update
);
2613 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; i
++)
2614 amdgpu_vm_free_reserved_vmid(adev
, vm
, i
);
2618 * amdgpu_vm_manager_init - init the VM manager
2620 * @adev: amdgpu_device pointer
2622 * Initialize the VM manager structures
2624 void amdgpu_vm_manager_init(struct amdgpu_device
*adev
)
2628 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
2629 struct amdgpu_vm_id_manager
*id_mgr
=
2630 &adev
->vm_manager
.id_mgr
[i
];
2632 mutex_init(&id_mgr
->lock
);
2633 INIT_LIST_HEAD(&id_mgr
->ids_lru
);
2634 atomic_set(&id_mgr
->reserved_vmid_num
, 0);
2636 /* skip over VMID 0, since it is the system VM */
2637 for (j
= 1; j
< id_mgr
->num_ids
; ++j
) {
2638 amdgpu_vm_reset_id(adev
, i
, j
);
2639 amdgpu_sync_create(&id_mgr
->ids
[i
].active
);
2640 list_add_tail(&id_mgr
->ids
[j
].list
, &id_mgr
->ids_lru
);
2644 adev
->vm_manager
.fence_context
=
2645 dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
2646 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
2647 adev
->vm_manager
.seqno
[i
] = 0;
2649 atomic_set(&adev
->vm_manager
.vm_pte_next_ring
, 0);
2650 atomic64_set(&adev
->vm_manager
.client_counter
, 0);
2651 spin_lock_init(&adev
->vm_manager
.prt_lock
);
2652 atomic_set(&adev
->vm_manager
.num_prt_users
, 0);
2654 /* If not overridden by the user, by default, only in large BAR systems
2655 * Compute VM tables will be updated by CPU
2657 #ifdef CONFIG_X86_64
2658 if (amdgpu_vm_update_mode
== -1) {
2659 if (amdgpu_vm_is_large_bar(adev
))
2660 adev
->vm_manager
.vm_update_mode
=
2661 AMDGPU_VM_USE_CPU_FOR_COMPUTE
;
2663 adev
->vm_manager
.vm_update_mode
= 0;
2665 adev
->vm_manager
.vm_update_mode
= amdgpu_vm_update_mode
;
2667 adev
->vm_manager
.vm_update_mode
= 0;
2673 * amdgpu_vm_manager_fini - cleanup VM manager
2675 * @adev: amdgpu_device pointer
2677 * Cleanup the VM manager and free resources.
2679 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
2683 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
2684 struct amdgpu_vm_id_manager
*id_mgr
=
2685 &adev
->vm_manager
.id_mgr
[i
];
2687 mutex_destroy(&id_mgr
->lock
);
2688 for (j
= 0; j
< AMDGPU_NUM_VM
; ++j
) {
2689 struct amdgpu_vm_id
*id
= &id_mgr
->ids
[j
];
2691 amdgpu_sync_free(&id
->active
);
2692 dma_fence_put(id
->flushed_updates
);
2693 dma_fence_put(id
->last_flush
);
2698 int amdgpu_vm_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
2700 union drm_amdgpu_vm
*args
= data
;
2701 struct amdgpu_device
*adev
= dev
->dev_private
;
2702 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
2705 switch (args
->in
.op
) {
2706 case AMDGPU_VM_OP_RESERVE_VMID
:
2707 /* current, we only have requirement to reserve vmid from gfxhub */
2708 r
= amdgpu_vm_alloc_reserved_vmid(adev
, &fpriv
->vm
,
2713 case AMDGPU_VM_OP_UNRESERVE_VMID
:
2714 amdgpu_vm_free_reserved_vmid(adev
, &fpriv
->vm
, AMDGPU_GFXHUB
);