2 * Copyright 2018 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/io-64-nonatomic-lo-hi.h>
30 #include "amdgpu_gmc.h"
31 #include "amdgpu_ras.h"
32 #include "amdgpu_xgmi.h"
35 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
37 * @bo: the BO to get the PDE for
38 * @level: the level in the PD hirarchy
39 * @addr: resulting addr
40 * @flags: resulting flags
42 * Get the address and flags to be used for a PDE (Page Directory Entry).
44 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo
*bo
, int level
,
45 uint64_t *addr
, uint64_t *flags
)
47 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
49 switch (bo
->tbo
.mem
.mem_type
) {
51 *addr
= bo
->tbo
.ttm
->dma_address
[0];
54 *addr
= amdgpu_bo_gpu_offset(bo
);
60 *flags
= amdgpu_ttm_tt_pde_flags(bo
->tbo
.ttm
, &bo
->tbo
.mem
);
61 amdgpu_gmc_get_vm_pde(adev
, level
, addr
, flags
);
65 * amdgpu_gmc_pd_addr - return the address of the root directory
68 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo
*bo
)
70 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
73 /* TODO: move that into ASIC specific code */
74 if (adev
->asic_type
>= CHIP_VEGA10
) {
75 uint64_t flags
= AMDGPU_PTE_VALID
;
77 amdgpu_gmc_get_pde_for_bo(bo
, -1, &pd_addr
, &flags
);
80 pd_addr
= amdgpu_bo_gpu_offset(bo
);
86 * amdgpu_gmc_set_pte_pde - update the page tables using CPU
88 * @adev: amdgpu_device pointer
89 * @cpu_pt_addr: cpu address of the page table
90 * @gpu_page_idx: entry in the page table to update
91 * @addr: dst addr to write into pte/pde
92 * @flags: access flags
94 * Update the page tables using CPU.
96 int amdgpu_gmc_set_pte_pde(struct amdgpu_device
*adev
, void *cpu_pt_addr
,
97 uint32_t gpu_page_idx
, uint64_t addr
,
100 void __iomem
*ptr
= (void *)cpu_pt_addr
;
104 * The following is for PTE only. GART does not have PDEs.
106 value
= addr
& 0x0000FFFFFFFFF000ULL
;
108 writeq(value
, ptr
+ (gpu_page_idx
* 8));
113 * amdgpu_gmc_agp_addr - return the address in the AGP address space
115 * @tbo: TTM BO which needs the address, must be in GTT domain
117 * Tries to figure out how to access the BO through the AGP aperture. Returns
118 * AMDGPU_BO_INVALID_OFFSET if that is not possible.
120 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object
*bo
)
122 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->bdev
);
124 if (bo
->num_pages
!= 1 || bo
->ttm
->caching
== ttm_cached
)
125 return AMDGPU_BO_INVALID_OFFSET
;
127 if (bo
->ttm
->dma_address
[0] + PAGE_SIZE
>= adev
->gmc
.agp_size
)
128 return AMDGPU_BO_INVALID_OFFSET
;
130 return adev
->gmc
.agp_start
+ bo
->ttm
->dma_address
[0];
134 * amdgpu_gmc_vram_location - try to find VRAM location
136 * @adev: amdgpu device structure holding all necessary information
137 * @mc: memory controller structure holding memory information
138 * @base: base address at which to put VRAM
140 * Function will try to place VRAM at base address provided
143 void amdgpu_gmc_vram_location(struct amdgpu_device
*adev
, struct amdgpu_gmc
*mc
,
146 uint64_t limit
= (uint64_t)amdgpu_vram_limit
<< 20;
148 mc
->vram_start
= base
;
149 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
150 if (limit
&& limit
< mc
->real_vram_size
)
151 mc
->real_vram_size
= limit
;
153 if (mc
->xgmi
.num_physical_nodes
== 0) {
154 mc
->fb_start
= mc
->vram_start
;
155 mc
->fb_end
= mc
->vram_end
;
157 dev_info(adev
->dev
, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
158 mc
->mc_vram_size
>> 20, mc
->vram_start
,
159 mc
->vram_end
, mc
->real_vram_size
>> 20);
163 * amdgpu_gmc_gart_location - try to find GART location
165 * @adev: amdgpu device structure holding all necessary information
166 * @mc: memory controller structure holding memory information
168 * Function will place try to place GART before or after VRAM.
170 * If GART size is bigger than space left then we ajust GART size.
171 * Thus function will never fails.
173 void amdgpu_gmc_gart_location(struct amdgpu_device
*adev
, struct amdgpu_gmc
*mc
)
175 const uint64_t four_gb
= 0x100000000ULL
;
176 u64 size_af
, size_bf
;
177 /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
178 u64 max_mc_address
= min(adev
->gmc
.mc_mask
, AMDGPU_GMC_HOLE_START
- 1);
180 mc
->gart_size
+= adev
->pm
.smu_prv_buffer_size
;
182 /* VCE doesn't like it when BOs cross a 4GB segment, so align
183 * the GART base on a 4GB boundary as well.
185 size_bf
= mc
->fb_start
;
186 size_af
= max_mc_address
+ 1 - ALIGN(mc
->fb_end
+ 1, four_gb
);
188 if (mc
->gart_size
> max(size_bf
, size_af
)) {
189 dev_warn(adev
->dev
, "limiting GART\n");
190 mc
->gart_size
= max(size_bf
, size_af
);
193 if ((size_bf
>= mc
->gart_size
&& size_bf
< size_af
) ||
194 (size_af
< mc
->gart_size
))
197 mc
->gart_start
= max_mc_address
- mc
->gart_size
+ 1;
199 mc
->gart_start
&= ~(four_gb
- 1);
200 mc
->gart_end
= mc
->gart_start
+ mc
->gart_size
- 1;
201 dev_info(adev
->dev
, "GART: %lluM 0x%016llX - 0x%016llX\n",
202 mc
->gart_size
>> 20, mc
->gart_start
, mc
->gart_end
);
206 * amdgpu_gmc_agp_location - try to find AGP location
207 * @adev: amdgpu device structure holding all necessary information
208 * @mc: memory controller structure holding memory information
210 * Function will place try to find a place for the AGP BAR in the MC address
213 * AGP BAR will be assigned the largest available hole in the address space.
214 * Should be called after VRAM and GART locations are setup.
216 void amdgpu_gmc_agp_location(struct amdgpu_device
*adev
, struct amdgpu_gmc
*mc
)
218 const uint64_t sixteen_gb
= 1ULL << 34;
219 const uint64_t sixteen_gb_mask
= ~(sixteen_gb
- 1);
220 u64 size_af
, size_bf
;
222 if (amdgpu_sriov_vf(adev
)) {
223 mc
->agp_start
= 0xffffffffffff;
230 if (mc
->fb_start
> mc
->gart_start
) {
231 size_bf
= (mc
->fb_start
& sixteen_gb_mask
) -
232 ALIGN(mc
->gart_end
+ 1, sixteen_gb
);
233 size_af
= mc
->mc_mask
+ 1 - ALIGN(mc
->fb_end
+ 1, sixteen_gb
);
235 size_bf
= mc
->fb_start
& sixteen_gb_mask
;
236 size_af
= (mc
->gart_start
& sixteen_gb_mask
) -
237 ALIGN(mc
->fb_end
+ 1, sixteen_gb
);
240 if (size_bf
> size_af
) {
241 mc
->agp_start
= (mc
->fb_start
- size_bf
) & sixteen_gb_mask
;
242 mc
->agp_size
= size_bf
;
244 mc
->agp_start
= ALIGN(mc
->fb_end
+ 1, sixteen_gb
);
245 mc
->agp_size
= size_af
;
248 mc
->agp_end
= mc
->agp_start
+ mc
->agp_size
- 1;
249 dev_info(adev
->dev
, "AGP: %lluM 0x%016llX - 0x%016llX\n",
250 mc
->agp_size
>> 20, mc
->agp_start
, mc
->agp_end
);
254 * amdgpu_gmc_filter_faults - filter VM faults
256 * @adev: amdgpu device structure
257 * @addr: address of the VM fault
258 * @pasid: PASID of the process causing the fault
259 * @timestamp: timestamp of the fault
262 * True if the fault was filtered and should not be processed further.
263 * False if the fault is a new one and needs to be handled.
265 bool amdgpu_gmc_filter_faults(struct amdgpu_device
*adev
, uint64_t addr
,
266 uint16_t pasid
, uint64_t timestamp
)
268 struct amdgpu_gmc
*gmc
= &adev
->gmc
;
270 uint64_t stamp
, key
= addr
<< 4 | pasid
;
271 struct amdgpu_gmc_fault
*fault
;
274 /* If we don't have space left in the ring buffer return immediately */
275 stamp
= max(timestamp
, AMDGPU_GMC_FAULT_TIMEOUT
+ 1) -
276 AMDGPU_GMC_FAULT_TIMEOUT
;
277 if (gmc
->fault_ring
[gmc
->last_fault
].timestamp
>= stamp
)
280 /* Try to find the fault in the hash */
281 hash
= hash_64(key
, AMDGPU_GMC_FAULT_HASH_ORDER
);
282 fault
= &gmc
->fault_ring
[gmc
->fault_hash
[hash
].idx
];
283 while (fault
->timestamp
>= stamp
) {
286 if (fault
->key
== key
)
289 tmp
= fault
->timestamp
;
290 fault
= &gmc
->fault_ring
[fault
->next
];
292 /* Check if the entry was reused */
293 if (fault
->timestamp
>= tmp
)
297 /* Add the fault to the ring */
298 fault
= &gmc
->fault_ring
[gmc
->last_fault
];
300 fault
->timestamp
= timestamp
;
302 /* And update the hash */
303 fault
->next
= gmc
->fault_hash
[hash
].idx
;
304 gmc
->fault_hash
[hash
].idx
= gmc
->last_fault
++;
308 int amdgpu_gmc_ras_late_init(struct amdgpu_device
*adev
)
312 if (adev
->umc
.funcs
&& adev
->umc
.funcs
->ras_late_init
) {
313 r
= adev
->umc
.funcs
->ras_late_init(adev
);
318 if (adev
->mmhub
.funcs
&& adev
->mmhub
.funcs
->ras_late_init
) {
319 r
= adev
->mmhub
.funcs
->ras_late_init(adev
);
324 return amdgpu_xgmi_ras_late_init(adev
);
327 void amdgpu_gmc_ras_fini(struct amdgpu_device
*adev
)
329 amdgpu_umc_ras_fini(adev
);
330 amdgpu_mmhub_ras_fini(adev
);
331 amdgpu_xgmi_ras_fini(adev
);
335 * The latest engine allocation on gfx9/10 is:
336 * Engine 2, 3: firmware
337 * Engine 0, 1, 4~16: amdgpu ring,
338 * subject to change when ring number changes
339 * Engine 17: Gart flushes
341 #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
342 #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
344 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device
*adev
)
346 struct amdgpu_ring
*ring
;
347 unsigned vm_inv_engs
[AMDGPU_MAX_VMHUBS
] =
348 {GFXHUB_FREE_VM_INV_ENGS_BITMAP
, MMHUB_FREE_VM_INV_ENGS_BITMAP
,
349 GFXHUB_FREE_VM_INV_ENGS_BITMAP
};
351 unsigned vmhub
, inv_eng
;
353 for (i
= 0; i
< adev
->num_rings
; ++i
) {
354 ring
= adev
->rings
[i
];
355 vmhub
= ring
->funcs
->vmhub
;
357 if (ring
== &adev
->mes
.ring
)
360 inv_eng
= ffs(vm_inv_engs
[vmhub
]);
362 dev_err(adev
->dev
, "no VM inv eng for ring %s\n",
367 ring
->vm_inv_eng
= inv_eng
- 1;
368 vm_inv_engs
[vmhub
] &= ~(1 << ring
->vm_inv_eng
);
370 dev_info(adev
->dev
, "ring %s uses VM inv eng %u on hub %u\n",
371 ring
->name
, ring
->vm_inv_eng
, ring
->funcs
->vmhub
);
378 * amdgpu_tmz_set -- check and set if a device supports TMZ
379 * @adev: amdgpu_device pointer
381 * Check and set if an the device @adev supports Trusted Memory
384 void amdgpu_gmc_tmz_set(struct amdgpu_device
*adev
)
386 switch (adev
->asic_type
) {
393 /* Don't enable it by default yet.
395 if (amdgpu_tmz
< 1) {
396 adev
->gmc
.tmz_enabled
= false;
398 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
400 adev
->gmc
.tmz_enabled
= true;
402 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
406 adev
->gmc
.tmz_enabled
= false;
408 "Trusted Memory Zone (TMZ) feature not supported\n");
414 * amdgpu_noretry_set -- set per asic noretry defaults
415 * @adev: amdgpu_device pointer
417 * Set a per asic default for the no-retry parameter.
420 void amdgpu_gmc_noretry_set(struct amdgpu_device
*adev
)
422 struct amdgpu_gmc
*gmc
= &adev
->gmc
;
424 switch (adev
->asic_type
) {
428 case CHIP_SIENNA_CICHLID
:
429 case CHIP_NAVY_FLOUNDER
:
430 case CHIP_DIMGREY_CAVEFISH
:
432 * noretry = 0 will cause kfd page fault tests fail
433 * for some ASICs, so set default to 1 for these ASICs.
435 if (amdgpu_noretry
== -1)
438 gmc
->noretry
= amdgpu_noretry
;
442 /* Raven currently has issues with noretry
443 * regardless of what we decide for other
444 * asics, we should leave raven with
445 * noretry = 0 until we root cause the
448 * default this to 0 for now, but we may want
449 * to change this in the future for certain
450 * GPUs as it can increase performance in
453 if (amdgpu_noretry
== -1)
456 gmc
->noretry
= amdgpu_noretry
;
461 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device
*adev
, int hub_type
,
464 struct amdgpu_vmhub
*hub
;
467 hub
= &adev
->vmhub
[hub_type
];
468 for (i
= 0; i
< 16; i
++) {
469 reg
= hub
->vm_context0_cntl
+ hub
->ctx_distance
* i
;
473 tmp
|= hub
->vm_cntx_cntl_vm_fault
;
475 tmp
&= ~hub
->vm_cntx_cntl_vm_fault
;
481 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device
*adev
)
487 * Currently there is a bug where some memory client outside
488 * of the driver writes to first 8M of VRAM on S3 resume,
489 * this overrides GART which by default gets placed in first 8M and
490 * causes VM_FAULTS once GTT is accessed.
491 * Keep the stolen memory reservation until the while this is not solved.
493 switch (adev
->asic_type
) {
497 adev
->mman
.keep_stolen_vga_memory
= true;
500 adev
->mman
.keep_stolen_vga_memory
= false;
504 if (!amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_DCE
))
507 size
= amdgpu_gmc_get_vbios_fb_size(adev
);
509 /* set to 0 if the pre-OS buffer uses up most of vram */
510 if ((adev
->gmc
.real_vram_size
- size
) < (8 * 1024 * 1024))
513 if (size
> AMDGPU_VBIOS_VGA_ALLOCATION
) {
514 adev
->mman
.stolen_vga_size
= AMDGPU_VBIOS_VGA_ALLOCATION
;
515 adev
->mman
.stolen_extended_size
= size
- adev
->mman
.stolen_vga_size
;
517 adev
->mman
.stolen_vga_size
= size
;
518 adev
->mman
.stolen_extended_size
= 0;