2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
41 #include "amdgpu_atombios.h"
44 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
);
45 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
);
46 static int gmc_v8_0_wait_for_idle(void *handle
);
48 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
49 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
50 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
51 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
53 static const u32 golden_settings_tonga_a11
[] =
55 mmMC_ARB_WTM_GRPWT_RD
, 0x00000003, 0x00000000,
56 mmMC_HUB_RDREQ_DMIF_LIMIT
, 0x0000007f, 0x00000028,
57 mmMC_HUB_WDP_UMC
, 0x00007fb6, 0x00000991,
58 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
59 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
60 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
61 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
64 static const u32 tonga_mgcg_cgcg_init
[] =
66 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
69 static const u32 golden_settings_fiji_a10
[] =
71 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
72 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
73 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
74 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
77 static const u32 fiji_mgcg_cgcg_init
[] =
79 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
82 static const u32 golden_settings_polaris11_a11
[] =
84 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
85 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
86 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
87 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff
90 static const u32 golden_settings_polaris10_a11
[] =
92 mmMC_ARB_WTM_GRPWT_RD
, 0x00000003, 0x00000000,
93 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
94 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
95 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
96 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff
99 static const u32 cz_mgcg_cgcg_init
[] =
101 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
104 static const u32 stoney_mgcg_cgcg_init
[] =
106 mmATC_MISC_CG
, 0xffffffff, 0x000c0200,
107 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
110 static const u32 golden_settings_stoney_common
[] =
112 mmMC_HUB_RDREQ_UVD
, MC_HUB_RDREQ_UVD__PRESCALE_MASK
, 0x00000004,
113 mmMC_RD_GRP_OTH
, MC_RD_GRP_OTH__UVD_MASK
, 0x00600000
116 static void gmc_v8_0_init_golden_registers(struct amdgpu_device
*adev
)
118 switch (adev
->asic_type
) {
120 amdgpu_program_register_sequence(adev
,
122 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
123 amdgpu_program_register_sequence(adev
,
124 golden_settings_fiji_a10
,
125 (const u32
)ARRAY_SIZE(golden_settings_fiji_a10
));
128 amdgpu_program_register_sequence(adev
,
129 tonga_mgcg_cgcg_init
,
130 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
131 amdgpu_program_register_sequence(adev
,
132 golden_settings_tonga_a11
,
133 (const u32
)ARRAY_SIZE(golden_settings_tonga_a11
));
137 amdgpu_program_register_sequence(adev
,
138 golden_settings_polaris11_a11
,
139 (const u32
)ARRAY_SIZE(golden_settings_polaris11_a11
));
142 amdgpu_program_register_sequence(adev
,
143 golden_settings_polaris10_a11
,
144 (const u32
)ARRAY_SIZE(golden_settings_polaris10_a11
));
147 amdgpu_program_register_sequence(adev
,
149 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
152 amdgpu_program_register_sequence(adev
,
153 stoney_mgcg_cgcg_init
,
154 (const u32
)ARRAY_SIZE(stoney_mgcg_cgcg_init
));
155 amdgpu_program_register_sequence(adev
,
156 golden_settings_stoney_common
,
157 (const u32
)ARRAY_SIZE(golden_settings_stoney_common
));
164 static void gmc_v8_0_mc_stop(struct amdgpu_device
*adev
,
165 struct amdgpu_mode_mc_save
*save
)
169 if (adev
->mode_info
.num_crtc
)
170 amdgpu_display_stop_mc_access(adev
, save
);
172 gmc_v8_0_wait_for_idle(adev
);
174 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
175 if (REG_GET_FIELD(blackout
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
) != 1) {
176 /* Block CPU access */
177 WREG32(mmBIF_FB_EN
, 0);
178 /* blackout the MC */
179 blackout
= REG_SET_FIELD(blackout
,
180 MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 1);
181 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
183 /* wait for the MC to settle */
187 static void gmc_v8_0_mc_resume(struct amdgpu_device
*adev
,
188 struct amdgpu_mode_mc_save
*save
)
192 /* unblackout the MC */
193 tmp
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
194 tmp
= REG_SET_FIELD(tmp
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
195 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, tmp
);
196 /* allow CPU access */
197 tmp
= REG_SET_FIELD(0, BIF_FB_EN
, FB_READ_EN
, 1);
198 tmp
= REG_SET_FIELD(tmp
, BIF_FB_EN
, FB_WRITE_EN
, 1);
199 WREG32(mmBIF_FB_EN
, tmp
);
201 if (adev
->mode_info
.num_crtc
)
202 amdgpu_display_resume_mc_access(adev
, save
);
206 * gmc_v8_0_init_microcode - load ucode images from disk
208 * @adev: amdgpu_device pointer
210 * Use the firmware interface to load the ucode images into
211 * the driver (not loaded into hw).
212 * Returns 0 on success, error on failure.
214 static int gmc_v8_0_init_microcode(struct amdgpu_device
*adev
)
216 const char *chip_name
;
222 switch (adev
->asic_type
) {
227 chip_name
= "polaris11";
230 chip_name
= "polaris10";
233 chip_name
= "polaris12";
242 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mc.bin", chip_name
);
243 err
= request_firmware(&adev
->mc
.fw
, fw_name
, adev
->dev
);
246 err
= amdgpu_ucode_validate(adev
->mc
.fw
);
250 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name
);
251 release_firmware(adev
->mc
.fw
);
258 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
260 * @adev: amdgpu_device pointer
262 * Load the GDDR MC ucode into the hw (CIK).
263 * Returns 0 on success, error on failure.
265 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device
*adev
)
267 const struct mc_firmware_header_v1_0
*hdr
;
268 const __le32
*fw_data
= NULL
;
269 const __le32
*io_mc_regs
= NULL
;
271 int i
, ucode_size
, regs_size
;
273 /* Skip MC ucode loading on SR-IOV capable boards.
274 * vbios does this for us in asic_init in that case.
275 * Skip MC ucode loading on VF, because hypervisor will do that
278 if (amdgpu_sriov_bios(adev
))
284 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
285 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
287 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
288 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
289 io_mc_regs
= (const __le32
*)
290 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
291 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
292 fw_data
= (const __le32
*)
293 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
295 running
= REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL
), MC_SEQ_SUP_CNTL
, RUN
);
298 /* reset the engine and set to writable */
299 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
300 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
302 /* load mc io regs */
303 for (i
= 0; i
< regs_size
; i
++) {
304 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
305 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
307 /* load the MC ucode */
308 for (i
= 0; i
< ucode_size
; i
++)
309 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
311 /* put the engine back into the active state */
312 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
313 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
314 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
316 /* wait for training to complete */
317 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
318 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
319 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D0
))
323 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
324 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
325 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D1
))
334 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device
*adev
)
336 const struct mc_firmware_header_v1_0
*hdr
;
337 const __le32
*fw_data
= NULL
;
338 const __le32
*io_mc_regs
= NULL
;
339 u32 data
, vbios_version
;
340 int i
, ucode_size
, regs_size
;
342 /* Skip MC ucode loading on SR-IOV capable boards.
343 * vbios does this for us in asic_init in that case.
344 * Skip MC ucode loading on VF, because hypervisor will do that
347 if (amdgpu_sriov_bios(adev
))
350 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, 0x9F);
351 data
= RREG32(mmMC_SEQ_IO_DEBUG_DATA
);
352 vbios_version
= data
& 0xf;
354 if (vbios_version
== 0)
360 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
361 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
363 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
364 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
365 io_mc_regs
= (const __le32
*)
366 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
367 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
368 fw_data
= (const __le32
*)
369 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
371 data
= RREG32(mmMC_SEQ_MISC0
);
373 WREG32(mmMC_SEQ_MISC0
, data
);
375 /* load mc io regs */
376 for (i
= 0; i
< regs_size
; i
++) {
377 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
378 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
381 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
382 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
384 /* load the MC ucode */
385 for (i
= 0; i
< ucode_size
; i
++)
386 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
388 /* put the engine back into the active state */
389 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
390 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
391 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
393 /* wait for training to complete */
394 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
395 data
= RREG32(mmMC_SEQ_MISC0
);
404 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device
*adev
,
405 struct amdgpu_mc
*mc
)
407 if (mc
->mc_vram_size
> 0xFFC0000000ULL
) {
408 /* leave room for at least 1024M GTT */
409 dev_warn(adev
->dev
, "limiting VRAM\n");
410 mc
->real_vram_size
= 0xFFC0000000ULL
;
411 mc
->mc_vram_size
= 0xFFC0000000ULL
;
413 amdgpu_vram_location(adev
, &adev
->mc
, 0);
414 adev
->mc
.gtt_base_align
= 0;
415 amdgpu_gtt_location(adev
, mc
);
419 * gmc_v8_0_mc_program - program the GPU memory controller
421 * @adev: amdgpu_device pointer
423 * Set the location of vram, gart, and AGP in the GPU's
424 * physical address space (CIK).
426 static void gmc_v8_0_mc_program(struct amdgpu_device
*adev
)
428 struct amdgpu_mode_mc_save save
;
433 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
434 WREG32((0xb05 + j
), 0x00000000);
435 WREG32((0xb06 + j
), 0x00000000);
436 WREG32((0xb07 + j
), 0x00000000);
437 WREG32((0xb08 + j
), 0x00000000);
438 WREG32((0xb09 + j
), 0x00000000);
440 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
, 0);
442 if (adev
->mode_info
.num_crtc
)
443 amdgpu_display_set_vga_render_state(adev
, false);
445 gmc_v8_0_mc_stop(adev
, &save
);
446 if (gmc_v8_0_wait_for_idle((void *)adev
)) {
447 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
449 /* Update configuration */
450 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
451 adev
->mc
.vram_start
>> 12);
452 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
453 adev
->mc
.vram_end
>> 12);
454 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
455 adev
->vram_scratch
.gpu_addr
>> 12);
456 tmp
= ((adev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
457 tmp
|= ((adev
->mc
.vram_start
>> 24) & 0xFFFF);
458 WREG32(mmMC_VM_FB_LOCATION
, tmp
);
459 /* XXX double check these! */
460 WREG32(mmHDP_NONSURFACE_BASE
, (adev
->mc
.vram_start
>> 8));
461 WREG32(mmHDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
462 WREG32(mmHDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
463 WREG32(mmMC_VM_AGP_BASE
, 0);
464 WREG32(mmMC_VM_AGP_TOP
, 0x0FFFFFFF);
465 WREG32(mmMC_VM_AGP_BOT
, 0x0FFFFFFF);
466 if (gmc_v8_0_wait_for_idle((void *)adev
)) {
467 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
469 gmc_v8_0_mc_resume(adev
, &save
);
471 WREG32(mmBIF_FB_EN
, BIF_FB_EN__FB_READ_EN_MASK
| BIF_FB_EN__FB_WRITE_EN_MASK
);
473 tmp
= RREG32(mmHDP_MISC_CNTL
);
474 tmp
= REG_SET_FIELD(tmp
, HDP_MISC_CNTL
, FLUSH_INVALIDATE_CACHE
, 0);
475 WREG32(mmHDP_MISC_CNTL
, tmp
);
477 tmp
= RREG32(mmHDP_HOST_PATH_CNTL
);
478 WREG32(mmHDP_HOST_PATH_CNTL
, tmp
);
482 * gmc_v8_0_mc_init - initialize the memory controller driver params
484 * @adev: amdgpu_device pointer
486 * Look up the amount of vram, vram width, and decide how to place
487 * vram and gart within the GPU's physical address space (CIK).
488 * Returns 0 for success.
490 static int gmc_v8_0_mc_init(struct amdgpu_device
*adev
)
492 adev
->mc
.vram_width
= amdgpu_atombios_get_vram_width(adev
);
493 if (!adev
->mc
.vram_width
) {
495 int chansize
, numchan
;
497 /* Get VRAM informations */
498 tmp
= RREG32(mmMC_ARB_RAMCFG
);
499 if (REG_GET_FIELD(tmp
, MC_ARB_RAMCFG
, CHANSIZE
)) {
504 tmp
= RREG32(mmMC_SHARED_CHMAP
);
505 switch (REG_GET_FIELD(tmp
, MC_SHARED_CHMAP
, NOOFCHAN
)) {
535 adev
->mc
.vram_width
= numchan
* chansize
;
537 /* Could aper size report 0 ? */
538 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
539 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
540 /* size in MB on si */
541 adev
->mc
.mc_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
542 adev
->mc
.real_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
545 if (adev
->flags
& AMD_IS_APU
) {
546 adev
->mc
.aper_base
= ((u64
)RREG32(mmMC_VM_FB_OFFSET
)) << 22;
547 adev
->mc
.aper_size
= adev
->mc
.real_vram_size
;
551 /* In case the PCI BAR is larger than the actual amount of vram */
552 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
553 if (adev
->mc
.visible_vram_size
> adev
->mc
.real_vram_size
)
554 adev
->mc
.visible_vram_size
= adev
->mc
.real_vram_size
;
556 /* unless the user had overridden it, set the gart
557 * size equal to the 1024 or vram, whichever is larger.
559 if (amdgpu_gart_size
== -1)
560 adev
->mc
.gtt_size
= max((AMDGPU_DEFAULT_GTT_SIZE_MB
<< 20),
561 adev
->mc
.mc_vram_size
);
563 adev
->mc
.gtt_size
= (uint64_t)amdgpu_gart_size
<< 20;
565 gmc_v8_0_vram_gtt_location(adev
, &adev
->mc
);
572 * VMID 0 is the physical GPU addresses as used by the kernel.
573 * VMIDs 1-15 are used for userspace clients and are handled
574 * by the amdgpu vm/hsa code.
578 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
580 * @adev: amdgpu_device pointer
581 * @vmid: vm instance to flush
583 * Flush the TLB for the requested page table (CIK).
585 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
588 /* flush hdp cache */
589 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0);
591 /* bits 0-15 are the VM contexts0-15 */
592 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
596 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
598 * @adev: amdgpu_device pointer
599 * @cpu_pt_addr: cpu address of the page table
600 * @gpu_page_idx: entry in the page table to update
601 * @addr: dst addr to write into pte/pde
602 * @flags: access flags
604 * Update the page tables using the CPU.
606 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
608 uint32_t gpu_page_idx
,
612 void __iomem
*ptr
= (void *)cpu_pt_addr
;
618 * 39:12 4k physical page base address
629 * 63:59 block fragment size
631 * 39:1 physical base address of PTE
632 * bits 5:1 must be 0.
635 value
= addr
& 0x000000FFFFFFF000ULL
;
637 writeq(value
, ptr
+ (gpu_page_idx
* 8));
642 static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device
*adev
,
645 uint64_t pte_flag
= 0;
647 if (flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
648 pte_flag
|= AMDGPU_PTE_EXECUTABLE
;
649 if (flags
& AMDGPU_VM_PAGE_READABLE
)
650 pte_flag
|= AMDGPU_PTE_READABLE
;
651 if (flags
& AMDGPU_VM_PAGE_WRITEABLE
)
652 pte_flag
|= AMDGPU_PTE_WRITEABLE
;
653 if (flags
& AMDGPU_VM_PAGE_PRT
)
654 pte_flag
|= AMDGPU_PTE_PRT
;
660 * gmc_v8_0_set_fault_enable_default - update VM fault handling
662 * @adev: amdgpu_device pointer
663 * @value: true redirects VM faults to the default page
665 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device
*adev
,
670 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
671 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
672 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
673 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
674 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
675 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
676 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
677 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
678 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
679 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
680 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
681 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
682 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
683 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
684 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
685 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
689 * gmc_v8_0_set_prt - set PRT VM fault
691 * @adev: amdgpu_device pointer
692 * @enable: enable/disable VM fault handling for PRT
694 static void gmc_v8_0_set_prt(struct amdgpu_device
*adev
, bool enable
)
698 if (enable
&& !adev
->mc
.prt_warning
) {
699 dev_warn(adev
->dev
, "Disabling VM faults because of PRT request!\n");
700 adev
->mc
.prt_warning
= true;
703 tmp
= RREG32(mmVM_PRT_CNTL
);
704 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
705 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS
, enable
);
706 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
707 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS
, enable
);
708 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
709 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS
, enable
);
710 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
711 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS
, enable
);
712 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
713 L2_CACHE_STORE_INVALID_ENTRIES
, enable
);
714 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
715 L1_TLB_STORE_INVALID_ENTRIES
, enable
);
716 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
717 MASK_PDE0_FAULT
, enable
);
718 WREG32(mmVM_PRT_CNTL
, tmp
);
721 uint32_t low
= AMDGPU_VA_RESERVED_SIZE
>> AMDGPU_GPU_PAGE_SHIFT
;
722 uint32_t high
= adev
->vm_manager
.max_pfn
;
724 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR
, low
);
725 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR
, low
);
726 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR
, low
);
727 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR
, low
);
728 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR
, high
);
729 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR
, high
);
730 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR
, high
);
731 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR
, high
);
733 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR
, 0xfffffff);
734 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR
, 0xfffffff);
735 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR
, 0xfffffff);
736 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR
, 0xfffffff);
737 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR
, 0x0);
738 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR
, 0x0);
739 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR
, 0x0);
740 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR
, 0x0);
745 * gmc_v8_0_gart_enable - gart enable
747 * @adev: amdgpu_device pointer
749 * This sets up the TLBs, programs the page tables for VMID0,
750 * sets up the hw for VMIDs 1-15 which are allocated on
751 * demand, and sets up the global locations for the LDS, GDS,
752 * and GPUVM for FSA64 clients (CIK).
753 * Returns 0 for success, errors for failure.
755 static int gmc_v8_0_gart_enable(struct amdgpu_device
*adev
)
760 if (adev
->gart
.robj
== NULL
) {
761 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
764 r
= amdgpu_gart_table_vram_pin(adev
);
767 /* Setup TLB control */
768 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
769 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
770 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 1);
771 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE
, 3);
772 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 1);
773 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_APERTURE_UNMAPPED_ACCESS
, 0);
774 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
776 tmp
= RREG32(mmVM_L2_CNTL
);
777 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
778 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
, 1);
779 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
, 1);
780 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
, 1);
781 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, EFFECTIVE_L2_QUEUE_SIZE
, 7);
782 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, CONTEXT1_IDENTITY_ACCESS_MODE
, 1);
783 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY
, 1);
784 WREG32(mmVM_L2_CNTL
, tmp
);
785 tmp
= RREG32(mmVM_L2_CNTL2
);
786 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
787 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
788 WREG32(mmVM_L2_CNTL2
, tmp
);
789 tmp
= RREG32(mmVM_L2_CNTL3
);
790 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
, 1);
791 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, BANK_SELECT
, 4);
792 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_FRAGMENT_SIZE
, 4);
793 WREG32(mmVM_L2_CNTL3
, tmp
);
794 /* XXX: set to enable PTE/PDE in system memory */
795 tmp
= RREG32(mmVM_L2_CNTL4
);
796 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL
, 0);
797 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED
, 0);
798 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP
, 0);
799 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL
, 0);
800 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED
, 0);
801 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP
, 0);
802 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL
, 0);
803 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED
, 0);
804 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP
, 0);
805 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL
, 0);
806 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED
, 0);
807 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP
, 0);
808 WREG32(mmVM_L2_CNTL4
, tmp
);
810 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
, adev
->mc
.gtt_start
>> 12);
811 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
, adev
->mc
.gtt_end
>> 12);
812 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, adev
->gart
.table_addr
>> 12);
813 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
814 (u32
)(adev
->dummy_page
.addr
>> 12));
815 WREG32(mmVM_CONTEXT0_CNTL2
, 0);
816 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
817 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
818 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
819 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
820 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
822 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR
, 0);
823 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR
, 0);
824 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET
, 0);
826 /* empty context1-15 */
827 /* FIXME start with 4G, once using 2 level pt switch to full
830 /* set vm size, must be a multiple of 4 */
831 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
832 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
, adev
->vm_manager
.max_pfn
- 1);
833 for (i
= 1; i
< 16; i
++) {
835 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
,
836 adev
->gart
.table_addr
>> 12);
838 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8,
839 adev
->gart
.table_addr
>> 12);
842 /* enable context1-15 */
843 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
844 (u32
)(adev
->dummy_page
.addr
>> 12));
845 WREG32(mmVM_CONTEXT1_CNTL2
, 4);
846 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
847 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
, 1);
848 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_DEPTH
, 1);
849 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
850 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
851 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
852 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
853 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, READ_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
854 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
855 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
856 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_BLOCK_SIZE
,
857 adev
->vm_manager
.block_size
- 9);
858 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
859 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
860 gmc_v8_0_set_fault_enable_default(adev
, false);
862 gmc_v8_0_set_fault_enable_default(adev
, true);
864 gmc_v8_0_gart_flush_gpu_tlb(adev
, 0);
865 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
866 (unsigned)(adev
->mc
.gtt_size
>> 20),
867 (unsigned long long)adev
->gart
.table_addr
);
868 adev
->gart
.ready
= true;
872 static int gmc_v8_0_gart_init(struct amdgpu_device
*adev
)
876 if (adev
->gart
.robj
) {
877 WARN(1, "R600 PCIE GART already initialized\n");
880 /* Initialize common gart structure */
881 r
= amdgpu_gart_init(adev
);
884 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
885 adev
->gart
.gart_pte_flags
= AMDGPU_PTE_EXECUTABLE
;
886 return amdgpu_gart_table_vram_alloc(adev
);
890 * gmc_v8_0_gart_disable - gart disable
892 * @adev: amdgpu_device pointer
894 * This disables all VM page table (CIK).
896 static void gmc_v8_0_gart_disable(struct amdgpu_device
*adev
)
900 /* Disable all tables */
901 WREG32(mmVM_CONTEXT0_CNTL
, 0);
902 WREG32(mmVM_CONTEXT1_CNTL
, 0);
903 /* Setup TLB control */
904 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
905 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
906 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 0);
907 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 0);
908 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
910 tmp
= RREG32(mmVM_L2_CNTL
);
911 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
912 WREG32(mmVM_L2_CNTL
, tmp
);
913 WREG32(mmVM_L2_CNTL2
, 0);
914 amdgpu_gart_table_vram_unpin(adev
);
918 * gmc_v8_0_gart_fini - vm fini callback
920 * @adev: amdgpu_device pointer
922 * Tears down the driver GART/VM setup (CIK).
924 static void gmc_v8_0_gart_fini(struct amdgpu_device
*adev
)
926 amdgpu_gart_table_vram_free(adev
);
927 amdgpu_gart_fini(adev
);
932 * VMID 0 is the physical GPU addresses as used by the kernel.
933 * VMIDs 1-15 are used for userspace clients and are handled
934 * by the amdgpu vm/hsa code.
937 * gmc_v8_0_vm_init - cik vm init callback
939 * @adev: amdgpu_device pointer
941 * Inits cik specific vm parameters (number of VMs, base of vram for
943 * Returns 0 for success.
945 static int gmc_v8_0_vm_init(struct amdgpu_device
*adev
)
949 * VMID 0 is reserved for System
950 * amdgpu graphics/compute will use VMIDs 1-7
951 * amdkfd will use VMIDs 8-15
953 adev
->vm_manager
.id_mgr
[0].num_ids
= AMDGPU_NUM_OF_VMIDS
;
954 adev
->vm_manager
.num_level
= 1;
955 amdgpu_vm_manager_init(adev
);
957 /* base offset of vram pages */
958 if (adev
->flags
& AMD_IS_APU
) {
959 u64 tmp
= RREG32(mmMC_VM_FB_OFFSET
);
961 adev
->vm_manager
.vram_base_offset
= tmp
;
963 adev
->vm_manager
.vram_base_offset
= 0;
969 * gmc_v8_0_vm_fini - cik vm fini callback
971 * @adev: amdgpu_device pointer
973 * Tear down any asic specific VM setup (CIK).
975 static void gmc_v8_0_vm_fini(struct amdgpu_device
*adev
)
980 * gmc_v8_0_vm_decode_fault - print human readable fault info
982 * @adev: amdgpu_device pointer
983 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
984 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
986 * Print human readable fault information (CIK).
988 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device
*adev
,
989 u32 status
, u32 addr
, u32 mc_client
)
992 u32 vmid
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
993 u32 protections
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
995 char block
[5] = { mc_client
>> 24, (mc_client
>> 16) & 0xff,
996 (mc_client
>> 8) & 0xff, mc_client
& 0xff, 0 };
998 mc_id
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
1001 dev_err(adev
->dev
, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1002 protections
, vmid
, addr
,
1003 REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
1005 "write" : "read", block
, mc_client
, mc_id
);
1008 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type
)
1010 switch (mc_seq_vram_type
) {
1011 case MC_SEQ_MISC0__MT__GDDR1
:
1012 return AMDGPU_VRAM_TYPE_GDDR1
;
1013 case MC_SEQ_MISC0__MT__DDR2
:
1014 return AMDGPU_VRAM_TYPE_DDR2
;
1015 case MC_SEQ_MISC0__MT__GDDR3
:
1016 return AMDGPU_VRAM_TYPE_GDDR3
;
1017 case MC_SEQ_MISC0__MT__GDDR4
:
1018 return AMDGPU_VRAM_TYPE_GDDR4
;
1019 case MC_SEQ_MISC0__MT__GDDR5
:
1020 return AMDGPU_VRAM_TYPE_GDDR5
;
1021 case MC_SEQ_MISC0__MT__HBM
:
1022 return AMDGPU_VRAM_TYPE_HBM
;
1023 case MC_SEQ_MISC0__MT__DDR3
:
1024 return AMDGPU_VRAM_TYPE_DDR3
;
1026 return AMDGPU_VRAM_TYPE_UNKNOWN
;
1030 static int gmc_v8_0_early_init(void *handle
)
1032 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1034 gmc_v8_0_set_gart_funcs(adev
);
1035 gmc_v8_0_set_irq_funcs(adev
);
1037 adev
->mc
.shared_aperture_start
= 0x2000000000000000ULL
;
1038 adev
->mc
.shared_aperture_end
=
1039 adev
->mc
.shared_aperture_start
+ (4ULL << 30) - 1;
1040 adev
->mc
.private_aperture_start
=
1041 adev
->mc
.shared_aperture_end
+ 1;
1042 adev
->mc
.private_aperture_end
=
1043 adev
->mc
.private_aperture_start
+ (4ULL << 30) - 1;
1048 static int gmc_v8_0_late_init(void *handle
)
1050 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1052 if (amdgpu_vm_fault_stop
!= AMDGPU_VM_FAULT_STOP_ALWAYS
)
1053 return amdgpu_irq_get(adev
, &adev
->mc
.vm_fault
, 0);
1058 #define mmMC_SEQ_MISC0_FIJI 0xA71
1060 static int gmc_v8_0_sw_init(void *handle
)
1064 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1066 if (adev
->flags
& AMD_IS_APU
) {
1067 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
1071 if (adev
->asic_type
== CHIP_FIJI
)
1072 tmp
= RREG32(mmMC_SEQ_MISC0_FIJI
);
1074 tmp
= RREG32(mmMC_SEQ_MISC0
);
1075 tmp
&= MC_SEQ_MISC0__MT__MASK
;
1076 adev
->mc
.vram_type
= gmc_v8_0_convert_vram_type(tmp
);
1079 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 146, &adev
->mc
.vm_fault
);
1083 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 147, &adev
->mc
.vm_fault
);
1087 /* Adjust VM size here.
1088 * Currently set to 4GB ((1 << 20) 4k pages).
1089 * Max GPUVM size for cayman and SI is 40 bits.
1091 amdgpu_vm_adjust_size(adev
, 64);
1092 adev
->vm_manager
.max_pfn
= adev
->vm_manager
.vm_size
<< 18;
1094 /* Set the internal MC address mask
1095 * This is the max address of the GPU's
1096 * internal address space.
1098 adev
->mc
.mc_mask
= 0xffffffffffULL
; /* 40 bit MC */
1100 /* set DMA mask + need_dma32 flags.
1101 * PCIE - can handle 40-bits.
1102 * IGP - can handle 40-bits
1103 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1105 adev
->need_dma32
= false;
1106 dma_bits
= adev
->need_dma32
? 32 : 40;
1107 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
1109 adev
->need_dma32
= true;
1111 pr_warn("amdgpu: No suitable DMA available\n");
1113 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
1115 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
1116 pr_warn("amdgpu: No coherent DMA available\n");
1119 r
= gmc_v8_0_init_microcode(adev
);
1121 DRM_ERROR("Failed to load mc firmware!\n");
1125 r
= gmc_v8_0_mc_init(adev
);
1129 /* Memory manager */
1130 r
= amdgpu_bo_init(adev
);
1134 r
= gmc_v8_0_gart_init(adev
);
1138 if (!adev
->vm_manager
.enabled
) {
1139 r
= gmc_v8_0_vm_init(adev
);
1141 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
1144 adev
->vm_manager
.enabled
= true;
1150 static int gmc_v8_0_sw_fini(void *handle
)
1152 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1154 if (adev
->vm_manager
.enabled
) {
1155 amdgpu_vm_manager_fini(adev
);
1156 gmc_v8_0_vm_fini(adev
);
1157 adev
->vm_manager
.enabled
= false;
1159 gmc_v8_0_gart_fini(adev
);
1160 amdgpu_gem_force_release(adev
);
1161 amdgpu_bo_fini(adev
);
1166 static int gmc_v8_0_hw_init(void *handle
)
1169 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1171 gmc_v8_0_init_golden_registers(adev
);
1173 gmc_v8_0_mc_program(adev
);
1175 if (adev
->asic_type
== CHIP_TONGA
) {
1176 r
= gmc_v8_0_tonga_mc_load_microcode(adev
);
1178 DRM_ERROR("Failed to load MC firmware!\n");
1181 } else if (adev
->asic_type
== CHIP_POLARIS11
||
1182 adev
->asic_type
== CHIP_POLARIS10
||
1183 adev
->asic_type
== CHIP_POLARIS12
) {
1184 r
= gmc_v8_0_polaris_mc_load_microcode(adev
);
1186 DRM_ERROR("Failed to load MC firmware!\n");
1191 r
= gmc_v8_0_gart_enable(adev
);
1198 static int gmc_v8_0_hw_fini(void *handle
)
1200 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1202 amdgpu_irq_put(adev
, &adev
->mc
.vm_fault
, 0);
1203 gmc_v8_0_gart_disable(adev
);
1208 static int gmc_v8_0_suspend(void *handle
)
1210 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1212 gmc_v8_0_hw_fini(adev
);
1217 static int gmc_v8_0_resume(void *handle
)
1220 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1222 r
= gmc_v8_0_hw_init(adev
);
1226 amdgpu_vm_reset_all_ids(adev
);
1231 static bool gmc_v8_0_is_idle(void *handle
)
1233 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1234 u32 tmp
= RREG32(mmSRBM_STATUS
);
1236 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1237 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
| SRBM_STATUS__VMC_BUSY_MASK
))
1243 static int gmc_v8_0_wait_for_idle(void *handle
)
1247 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1249 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1250 /* read MC_STATUS */
1251 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__MCB_BUSY_MASK
|
1252 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1253 SRBM_STATUS__MCC_BUSY_MASK
|
1254 SRBM_STATUS__MCD_BUSY_MASK
|
1255 SRBM_STATUS__VMC_BUSY_MASK
|
1256 SRBM_STATUS__VMC1_BUSY_MASK
);
1265 static bool gmc_v8_0_check_soft_reset(void *handle
)
1267 u32 srbm_soft_reset
= 0;
1268 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1269 u32 tmp
= RREG32(mmSRBM_STATUS
);
1271 if (tmp
& SRBM_STATUS__VMC_BUSY_MASK
)
1272 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1273 SRBM_SOFT_RESET
, SOFT_RESET_VMC
, 1);
1275 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1276 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
)) {
1277 if (!(adev
->flags
& AMD_IS_APU
))
1278 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1279 SRBM_SOFT_RESET
, SOFT_RESET_MC
, 1);
1281 if (srbm_soft_reset
) {
1282 adev
->mc
.srbm_soft_reset
= srbm_soft_reset
;
1285 adev
->mc
.srbm_soft_reset
= 0;
1290 static int gmc_v8_0_pre_soft_reset(void *handle
)
1292 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1294 if (!adev
->mc
.srbm_soft_reset
)
1297 gmc_v8_0_mc_stop(adev
, &adev
->mc
.save
);
1298 if (gmc_v8_0_wait_for_idle(adev
)) {
1299 dev_warn(adev
->dev
, "Wait for GMC idle timed out !\n");
1305 static int gmc_v8_0_soft_reset(void *handle
)
1307 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1308 u32 srbm_soft_reset
;
1310 if (!adev
->mc
.srbm_soft_reset
)
1312 srbm_soft_reset
= adev
->mc
.srbm_soft_reset
;
1314 if (srbm_soft_reset
) {
1317 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1318 tmp
|= srbm_soft_reset
;
1319 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1320 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1321 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1325 tmp
&= ~srbm_soft_reset
;
1326 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1327 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1329 /* Wait a little for things to settle down */
1336 static int gmc_v8_0_post_soft_reset(void *handle
)
1338 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1340 if (!adev
->mc
.srbm_soft_reset
)
1343 gmc_v8_0_mc_resume(adev
, &adev
->mc
.save
);
1347 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
1348 struct amdgpu_irq_src
*src
,
1350 enum amdgpu_interrupt_state state
)
1353 u32 bits
= (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1354 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1355 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1356 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1357 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1358 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1359 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
);
1362 case AMDGPU_IRQ_STATE_DISABLE
:
1363 /* system context */
1364 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1366 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1368 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1370 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1372 case AMDGPU_IRQ_STATE_ENABLE
:
1373 /* system context */
1374 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1376 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1378 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1380 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1389 static int gmc_v8_0_process_interrupt(struct amdgpu_device
*adev
,
1390 struct amdgpu_irq_src
*source
,
1391 struct amdgpu_iv_entry
*entry
)
1393 u32 addr
, status
, mc_client
;
1395 if (amdgpu_sriov_vf(adev
)) {
1396 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1397 entry
->src_id
, entry
->src_data
[0]);
1398 dev_err(adev
->dev
, " Can't decode VM fault info here on SRIOV VF\n");
1402 addr
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
);
1403 status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
1404 mc_client
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT
);
1405 /* reset addr and status */
1406 WREG32_P(mmVM_CONTEXT1_CNTL2
, 1, ~1);
1408 if (!addr
&& !status
)
1411 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_FIRST
)
1412 gmc_v8_0_set_fault_enable_default(adev
, false);
1414 if (printk_ratelimit()) {
1415 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1416 entry
->src_id
, entry
->src_data
[0]);
1417 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1419 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1421 gmc_v8_0_vm_decode_fault(adev
, status
, addr
, mc_client
);
1427 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1432 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
)) {
1433 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1434 data
|= MC_HUB_MISC_HUB_CG__ENABLE_MASK
;
1435 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1437 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1438 data
|= MC_HUB_MISC_SIP_CG__ENABLE_MASK
;
1439 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1441 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1442 data
|= MC_HUB_MISC_VM_CG__ENABLE_MASK
;
1443 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1445 data
= RREG32(mmMC_XPB_CLK_GAT
);
1446 data
|= MC_XPB_CLK_GAT__ENABLE_MASK
;
1447 WREG32(mmMC_XPB_CLK_GAT
, data
);
1449 data
= RREG32(mmATC_MISC_CG
);
1450 data
|= ATC_MISC_CG__ENABLE_MASK
;
1451 WREG32(mmATC_MISC_CG
, data
);
1453 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1454 data
|= MC_CITF_MISC_WR_CG__ENABLE_MASK
;
1455 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1457 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1458 data
|= MC_CITF_MISC_RD_CG__ENABLE_MASK
;
1459 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1461 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1462 data
|= MC_CITF_MISC_VM_CG__ENABLE_MASK
;
1463 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1465 data
= RREG32(mmVM_L2_CG
);
1466 data
|= VM_L2_CG__ENABLE_MASK
;
1467 WREG32(mmVM_L2_CG
, data
);
1469 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1470 data
&= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK
;
1471 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1473 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1474 data
&= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK
;
1475 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1477 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1478 data
&= ~MC_HUB_MISC_VM_CG__ENABLE_MASK
;
1479 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1481 data
= RREG32(mmMC_XPB_CLK_GAT
);
1482 data
&= ~MC_XPB_CLK_GAT__ENABLE_MASK
;
1483 WREG32(mmMC_XPB_CLK_GAT
, data
);
1485 data
= RREG32(mmATC_MISC_CG
);
1486 data
&= ~ATC_MISC_CG__ENABLE_MASK
;
1487 WREG32(mmATC_MISC_CG
, data
);
1489 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1490 data
&= ~MC_CITF_MISC_WR_CG__ENABLE_MASK
;
1491 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1493 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1494 data
&= ~MC_CITF_MISC_RD_CG__ENABLE_MASK
;
1495 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1497 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1498 data
&= ~MC_CITF_MISC_VM_CG__ENABLE_MASK
;
1499 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1501 data
= RREG32(mmVM_L2_CG
);
1502 data
&= ~VM_L2_CG__ENABLE_MASK
;
1503 WREG32(mmVM_L2_CG
, data
);
1507 static void fiji_update_mc_light_sleep(struct amdgpu_device
*adev
,
1512 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
)) {
1513 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1514 data
|= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
;
1515 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1517 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1518 data
|= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
;
1519 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1521 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1522 data
|= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1523 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1525 data
= RREG32(mmMC_XPB_CLK_GAT
);
1526 data
|= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
;
1527 WREG32(mmMC_XPB_CLK_GAT
, data
);
1529 data
= RREG32(mmATC_MISC_CG
);
1530 data
|= ATC_MISC_CG__MEM_LS_ENABLE_MASK
;
1531 WREG32(mmATC_MISC_CG
, data
);
1533 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1534 data
|= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
;
1535 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1537 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1538 data
|= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
;
1539 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1541 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1542 data
|= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1543 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1545 data
= RREG32(mmVM_L2_CG
);
1546 data
|= VM_L2_CG__MEM_LS_ENABLE_MASK
;
1547 WREG32(mmVM_L2_CG
, data
);
1549 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1550 data
&= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
;
1551 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1553 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1554 data
&= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
;
1555 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1557 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1558 data
&= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1559 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1561 data
= RREG32(mmMC_XPB_CLK_GAT
);
1562 data
&= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
;
1563 WREG32(mmMC_XPB_CLK_GAT
, data
);
1565 data
= RREG32(mmATC_MISC_CG
);
1566 data
&= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK
;
1567 WREG32(mmATC_MISC_CG
, data
);
1569 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1570 data
&= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
;
1571 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1573 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1574 data
&= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
;
1575 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1577 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1578 data
&= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1579 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1581 data
= RREG32(mmVM_L2_CG
);
1582 data
&= ~VM_L2_CG__MEM_LS_ENABLE_MASK
;
1583 WREG32(mmVM_L2_CG
, data
);
1587 static int gmc_v8_0_set_clockgating_state(void *handle
,
1588 enum amd_clockgating_state state
)
1590 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1592 if (amdgpu_sriov_vf(adev
))
1595 switch (adev
->asic_type
) {
1597 fiji_update_mc_medium_grain_clock_gating(adev
,
1598 state
== AMD_CG_STATE_GATE
);
1599 fiji_update_mc_light_sleep(adev
,
1600 state
== AMD_CG_STATE_GATE
);
1608 static int gmc_v8_0_set_powergating_state(void *handle
,
1609 enum amd_powergating_state state
)
1614 static void gmc_v8_0_get_clockgating_state(void *handle
, u32
*flags
)
1616 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1619 if (amdgpu_sriov_vf(adev
))
1622 /* AMD_CG_SUPPORT_MC_MGCG */
1623 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1624 if (data
& MC_HUB_MISC_HUB_CG__ENABLE_MASK
)
1625 *flags
|= AMD_CG_SUPPORT_MC_MGCG
;
1627 /* AMD_CG_SUPPORT_MC_LS */
1628 if (data
& MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
)
1629 *flags
|= AMD_CG_SUPPORT_MC_LS
;
1632 static const struct amd_ip_funcs gmc_v8_0_ip_funcs
= {
1634 .early_init
= gmc_v8_0_early_init
,
1635 .late_init
= gmc_v8_0_late_init
,
1636 .sw_init
= gmc_v8_0_sw_init
,
1637 .sw_fini
= gmc_v8_0_sw_fini
,
1638 .hw_init
= gmc_v8_0_hw_init
,
1639 .hw_fini
= gmc_v8_0_hw_fini
,
1640 .suspend
= gmc_v8_0_suspend
,
1641 .resume
= gmc_v8_0_resume
,
1642 .is_idle
= gmc_v8_0_is_idle
,
1643 .wait_for_idle
= gmc_v8_0_wait_for_idle
,
1644 .check_soft_reset
= gmc_v8_0_check_soft_reset
,
1645 .pre_soft_reset
= gmc_v8_0_pre_soft_reset
,
1646 .soft_reset
= gmc_v8_0_soft_reset
,
1647 .post_soft_reset
= gmc_v8_0_post_soft_reset
,
1648 .set_clockgating_state
= gmc_v8_0_set_clockgating_state
,
1649 .set_powergating_state
= gmc_v8_0_set_powergating_state
,
1650 .get_clockgating_state
= gmc_v8_0_get_clockgating_state
,
1653 static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs
= {
1654 .flush_gpu_tlb
= gmc_v8_0_gart_flush_gpu_tlb
,
1655 .set_pte_pde
= gmc_v8_0_gart_set_pte_pde
,
1656 .set_prt
= gmc_v8_0_set_prt
,
1657 .get_vm_pte_flags
= gmc_v8_0_get_vm_pte_flags
1660 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs
= {
1661 .set
= gmc_v8_0_vm_fault_interrupt_state
,
1662 .process
= gmc_v8_0_process_interrupt
,
1665 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
)
1667 if (adev
->gart
.gart_funcs
== NULL
)
1668 adev
->gart
.gart_funcs
= &gmc_v8_0_gart_funcs
;
1671 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
)
1673 adev
->mc
.vm_fault
.num_types
= 1;
1674 adev
->mc
.vm_fault
.funcs
= &gmc_v8_0_irq_funcs
;
1677 const struct amdgpu_ip_block_version gmc_v8_0_ip_block
=
1679 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1683 .funcs
= &gmc_v8_0_ip_funcs
,
1686 const struct amdgpu_ip_block_version gmc_v8_1_ip_block
=
1688 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1692 .funcs
= &gmc_v8_0_ip_funcs
,
1695 const struct amdgpu_ip_block_version gmc_v8_5_ip_block
=
1697 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1701 .funcs
= &gmc_v8_0_ip_funcs
,