2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
38 #include "dce/dce_10_0_d.h"
39 #include "dce/dce_10_0_sh_mask.h"
44 #include "amdgpu_atombios.h"
47 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
);
48 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
);
49 static int gmc_v8_0_wait_for_idle(void *handle
);
51 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
52 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
53 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
54 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
56 static const u32 golden_settings_tonga_a11
[] =
58 mmMC_ARB_WTM_GRPWT_RD
, 0x00000003, 0x00000000,
59 mmMC_HUB_RDREQ_DMIF_LIMIT
, 0x0000007f, 0x00000028,
60 mmMC_HUB_WDP_UMC
, 0x00007fb6, 0x00000991,
61 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
62 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
63 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
64 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
67 static const u32 tonga_mgcg_cgcg_init
[] =
69 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
72 static const u32 golden_settings_fiji_a10
[] =
74 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
75 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
76 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
77 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
80 static const u32 fiji_mgcg_cgcg_init
[] =
82 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
85 static const u32 golden_settings_polaris11_a11
[] =
87 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
88 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
89 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
90 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff
93 static const u32 golden_settings_polaris10_a11
[] =
95 mmMC_ARB_WTM_GRPWT_RD
, 0x00000003, 0x00000000,
96 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
97 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
98 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
99 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff
102 static const u32 cz_mgcg_cgcg_init
[] =
104 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
107 static const u32 stoney_mgcg_cgcg_init
[] =
109 mmATC_MISC_CG
, 0xffffffff, 0x000c0200,
110 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
113 static const u32 golden_settings_stoney_common
[] =
115 mmMC_HUB_RDREQ_UVD
, MC_HUB_RDREQ_UVD__PRESCALE_MASK
, 0x00000004,
116 mmMC_RD_GRP_OTH
, MC_RD_GRP_OTH__UVD_MASK
, 0x00600000
119 static void gmc_v8_0_init_golden_registers(struct amdgpu_device
*adev
)
121 switch (adev
->asic_type
) {
123 amdgpu_program_register_sequence(adev
,
125 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
126 amdgpu_program_register_sequence(adev
,
127 golden_settings_fiji_a10
,
128 (const u32
)ARRAY_SIZE(golden_settings_fiji_a10
));
131 amdgpu_program_register_sequence(adev
,
132 tonga_mgcg_cgcg_init
,
133 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
134 amdgpu_program_register_sequence(adev
,
135 golden_settings_tonga_a11
,
136 (const u32
)ARRAY_SIZE(golden_settings_tonga_a11
));
140 amdgpu_program_register_sequence(adev
,
141 golden_settings_polaris11_a11
,
142 (const u32
)ARRAY_SIZE(golden_settings_polaris11_a11
));
145 amdgpu_program_register_sequence(adev
,
146 golden_settings_polaris10_a11
,
147 (const u32
)ARRAY_SIZE(golden_settings_polaris10_a11
));
150 amdgpu_program_register_sequence(adev
,
152 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
155 amdgpu_program_register_sequence(adev
,
156 stoney_mgcg_cgcg_init
,
157 (const u32
)ARRAY_SIZE(stoney_mgcg_cgcg_init
));
158 amdgpu_program_register_sequence(adev
,
159 golden_settings_stoney_common
,
160 (const u32
)ARRAY_SIZE(golden_settings_stoney_common
));
167 static void gmc_v8_0_mc_stop(struct amdgpu_device
*adev
)
171 gmc_v8_0_wait_for_idle(adev
);
173 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
174 if (REG_GET_FIELD(blackout
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
) != 1) {
175 /* Block CPU access */
176 WREG32(mmBIF_FB_EN
, 0);
177 /* blackout the MC */
178 blackout
= REG_SET_FIELD(blackout
,
179 MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 1);
180 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
182 /* wait for the MC to settle */
186 static void gmc_v8_0_mc_resume(struct amdgpu_device
*adev
)
190 /* unblackout the MC */
191 tmp
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
192 tmp
= REG_SET_FIELD(tmp
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
193 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, tmp
);
194 /* allow CPU access */
195 tmp
= REG_SET_FIELD(0, BIF_FB_EN
, FB_READ_EN
, 1);
196 tmp
= REG_SET_FIELD(tmp
, BIF_FB_EN
, FB_WRITE_EN
, 1);
197 WREG32(mmBIF_FB_EN
, tmp
);
201 * gmc_v8_0_init_microcode - load ucode images from disk
203 * @adev: amdgpu_device pointer
205 * Use the firmware interface to load the ucode images into
206 * the driver (not loaded into hw).
207 * Returns 0 on success, error on failure.
209 static int gmc_v8_0_init_microcode(struct amdgpu_device
*adev
)
211 const char *chip_name
;
217 switch (adev
->asic_type
) {
222 chip_name
= "polaris11";
225 chip_name
= "polaris10";
228 chip_name
= "polaris12";
237 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mc.bin", chip_name
);
238 err
= request_firmware(&adev
->mc
.fw
, fw_name
, adev
->dev
);
241 err
= amdgpu_ucode_validate(adev
->mc
.fw
);
245 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name
);
246 release_firmware(adev
->mc
.fw
);
253 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
255 * @adev: amdgpu_device pointer
257 * Load the GDDR MC ucode into the hw (CIK).
258 * Returns 0 on success, error on failure.
260 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device
*adev
)
262 const struct mc_firmware_header_v1_0
*hdr
;
263 const __le32
*fw_data
= NULL
;
264 const __le32
*io_mc_regs
= NULL
;
266 int i
, ucode_size
, regs_size
;
268 /* Skip MC ucode loading on SR-IOV capable boards.
269 * vbios does this for us in asic_init in that case.
270 * Skip MC ucode loading on VF, because hypervisor will do that
273 if (amdgpu_sriov_bios(adev
))
279 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
280 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
282 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
283 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
284 io_mc_regs
= (const __le32
*)
285 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
286 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
287 fw_data
= (const __le32
*)
288 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
290 running
= REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL
), MC_SEQ_SUP_CNTL
, RUN
);
293 /* reset the engine and set to writable */
294 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
295 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
297 /* load mc io regs */
298 for (i
= 0; i
< regs_size
; i
++) {
299 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
300 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
302 /* load the MC ucode */
303 for (i
= 0; i
< ucode_size
; i
++)
304 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
306 /* put the engine back into the active state */
307 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
308 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
309 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
311 /* wait for training to complete */
312 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
313 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
314 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D0
))
318 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
319 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
320 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D1
))
329 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device
*adev
)
331 const struct mc_firmware_header_v1_0
*hdr
;
332 const __le32
*fw_data
= NULL
;
333 const __le32
*io_mc_regs
= NULL
;
334 u32 data
, vbios_version
;
335 int i
, ucode_size
, regs_size
;
337 /* Skip MC ucode loading on SR-IOV capable boards.
338 * vbios does this for us in asic_init in that case.
339 * Skip MC ucode loading on VF, because hypervisor will do that
342 if (amdgpu_sriov_bios(adev
))
345 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, 0x9F);
346 data
= RREG32(mmMC_SEQ_IO_DEBUG_DATA
);
347 vbios_version
= data
& 0xf;
349 if (vbios_version
== 0)
355 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
356 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
358 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
359 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
360 io_mc_regs
= (const __le32
*)
361 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
362 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
363 fw_data
= (const __le32
*)
364 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
366 data
= RREG32(mmMC_SEQ_MISC0
);
368 WREG32(mmMC_SEQ_MISC0
, data
);
370 /* load mc io regs */
371 for (i
= 0; i
< regs_size
; i
++) {
372 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
373 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
376 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
377 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
379 /* load the MC ucode */
380 for (i
= 0; i
< ucode_size
; i
++)
381 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
383 /* put the engine back into the active state */
384 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
385 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
386 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
388 /* wait for training to complete */
389 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
390 data
= RREG32(mmMC_SEQ_MISC0
);
399 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device
*adev
,
400 struct amdgpu_mc
*mc
)
404 if (!amdgpu_sriov_vf(adev
))
405 base
= RREG32(mmMC_VM_FB_LOCATION
) & 0xFFFF;
408 if (mc
->mc_vram_size
> 0xFFC0000000ULL
) {
409 /* leave room for at least 1024M GTT */
410 dev_warn(adev
->dev
, "limiting VRAM\n");
411 mc
->real_vram_size
= 0xFFC0000000ULL
;
412 mc
->mc_vram_size
= 0xFFC0000000ULL
;
414 amdgpu_vram_location(adev
, &adev
->mc
, base
);
415 amdgpu_gart_location(adev
, mc
);
419 * gmc_v8_0_mc_program - program the GPU memory controller
421 * @adev: amdgpu_device pointer
423 * Set the location of vram, gart, and AGP in the GPU's
424 * physical address space (CIK).
426 static void gmc_v8_0_mc_program(struct amdgpu_device
*adev
)
432 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
433 WREG32((0xb05 + j
), 0x00000000);
434 WREG32((0xb06 + j
), 0x00000000);
435 WREG32((0xb07 + j
), 0x00000000);
436 WREG32((0xb08 + j
), 0x00000000);
437 WREG32((0xb09 + j
), 0x00000000);
439 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
, 0);
441 if (gmc_v8_0_wait_for_idle((void *)adev
)) {
442 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
444 if (adev
->mode_info
.num_crtc
) {
445 /* Lockout access through VGA aperture*/
446 tmp
= RREG32(mmVGA_HDP_CONTROL
);
447 tmp
= REG_SET_FIELD(tmp
, VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
, 1);
448 WREG32(mmVGA_HDP_CONTROL
, tmp
);
450 /* disable VGA render */
451 tmp
= RREG32(mmVGA_RENDER_CONTROL
);
452 tmp
= REG_SET_FIELD(tmp
, VGA_RENDER_CONTROL
, VGA_VSTATUS_CNTL
, 0);
453 WREG32(mmVGA_RENDER_CONTROL
, tmp
);
455 /* Update configuration */
456 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
457 adev
->mc
.vram_start
>> 12);
458 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
459 adev
->mc
.vram_end
>> 12);
460 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
461 adev
->vram_scratch
.gpu_addr
>> 12);
463 if (amdgpu_sriov_vf(adev
)) {
464 tmp
= ((adev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
465 tmp
|= ((adev
->mc
.vram_start
>> 24) & 0xFFFF);
466 WREG32(mmMC_VM_FB_LOCATION
, tmp
);
467 /* XXX double check these! */
468 WREG32(mmHDP_NONSURFACE_BASE
, (adev
->mc
.vram_start
>> 8));
469 WREG32(mmHDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
470 WREG32(mmHDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
473 WREG32(mmMC_VM_AGP_BASE
, 0);
474 WREG32(mmMC_VM_AGP_TOP
, 0x0FFFFFFF);
475 WREG32(mmMC_VM_AGP_BOT
, 0x0FFFFFFF);
476 if (gmc_v8_0_wait_for_idle((void *)adev
)) {
477 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
480 WREG32(mmBIF_FB_EN
, BIF_FB_EN__FB_READ_EN_MASK
| BIF_FB_EN__FB_WRITE_EN_MASK
);
482 tmp
= RREG32(mmHDP_MISC_CNTL
);
483 tmp
= REG_SET_FIELD(tmp
, HDP_MISC_CNTL
, FLUSH_INVALIDATE_CACHE
, 0);
484 WREG32(mmHDP_MISC_CNTL
, tmp
);
486 tmp
= RREG32(mmHDP_HOST_PATH_CNTL
);
487 WREG32(mmHDP_HOST_PATH_CNTL
, tmp
);
491 * gmc_v8_0_mc_init - initialize the memory controller driver params
493 * @adev: amdgpu_device pointer
495 * Look up the amount of vram, vram width, and decide how to place
496 * vram and gart within the GPU's physical address space (CIK).
497 * Returns 0 for success.
499 static int gmc_v8_0_mc_init(struct amdgpu_device
*adev
)
501 adev
->mc
.vram_width
= amdgpu_atombios_get_vram_width(adev
);
502 if (!adev
->mc
.vram_width
) {
504 int chansize
, numchan
;
506 /* Get VRAM informations */
507 tmp
= RREG32(mmMC_ARB_RAMCFG
);
508 if (REG_GET_FIELD(tmp
, MC_ARB_RAMCFG
, CHANSIZE
)) {
513 tmp
= RREG32(mmMC_SHARED_CHMAP
);
514 switch (REG_GET_FIELD(tmp
, MC_SHARED_CHMAP
, NOOFCHAN
)) {
544 adev
->mc
.vram_width
= numchan
* chansize
;
546 /* Could aper size report 0 ? */
547 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
548 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
549 /* size in MB on si */
550 adev
->mc
.mc_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
551 adev
->mc
.real_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
554 if (adev
->flags
& AMD_IS_APU
) {
555 adev
->mc
.aper_base
= ((u64
)RREG32(mmMC_VM_FB_OFFSET
)) << 22;
556 adev
->mc
.aper_size
= adev
->mc
.real_vram_size
;
560 /* In case the PCI BAR is larger than the actual amount of vram */
561 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
562 if (adev
->mc
.visible_vram_size
> adev
->mc
.real_vram_size
)
563 adev
->mc
.visible_vram_size
= adev
->mc
.real_vram_size
;
565 /* set the gart size */
566 if (amdgpu_gart_size
== -1) {
567 switch (adev
->asic_type
) {
568 case CHIP_POLARIS11
: /* all engines support GPUVM */
569 case CHIP_POLARIS10
: /* all engines support GPUVM */
570 case CHIP_POLARIS12
: /* all engines support GPUVM */
572 adev
->mc
.gart_size
= 256ULL << 20;
574 case CHIP_TONGA
: /* UVD, VCE do not support GPUVM */
575 case CHIP_FIJI
: /* UVD, VCE do not support GPUVM */
576 case CHIP_CARRIZO
: /* UVD, VCE do not support GPUVM, DCE SG support */
577 case CHIP_STONEY
: /* UVD does not support GPUVM, DCE SG support */
578 adev
->mc
.gart_size
= 1024ULL << 20;
582 adev
->mc
.gart_size
= (u64
)amdgpu_gart_size
<< 20;
585 gmc_v8_0_vram_gtt_location(adev
, &adev
->mc
);
592 * VMID 0 is the physical GPU addresses as used by the kernel.
593 * VMIDs 1-15 are used for userspace clients and are handled
594 * by the amdgpu vm/hsa code.
598 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
600 * @adev: amdgpu_device pointer
601 * @vmid: vm instance to flush
603 * Flush the TLB for the requested page table (CIK).
605 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
608 /* flush hdp cache */
609 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0);
611 /* bits 0-15 are the VM contexts0-15 */
612 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
616 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
618 * @adev: amdgpu_device pointer
619 * @cpu_pt_addr: cpu address of the page table
620 * @gpu_page_idx: entry in the page table to update
621 * @addr: dst addr to write into pte/pde
622 * @flags: access flags
624 * Update the page tables using the CPU.
626 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
628 uint32_t gpu_page_idx
,
632 void __iomem
*ptr
= (void *)cpu_pt_addr
;
638 * 39:12 4k physical page base address
649 * 63:59 block fragment size
651 * 39:1 physical base address of PTE
652 * bits 5:1 must be 0.
655 value
= addr
& 0x000000FFFFFFF000ULL
;
657 writeq(value
, ptr
+ (gpu_page_idx
* 8));
662 static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device
*adev
,
665 uint64_t pte_flag
= 0;
667 if (flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
668 pte_flag
|= AMDGPU_PTE_EXECUTABLE
;
669 if (flags
& AMDGPU_VM_PAGE_READABLE
)
670 pte_flag
|= AMDGPU_PTE_READABLE
;
671 if (flags
& AMDGPU_VM_PAGE_WRITEABLE
)
672 pte_flag
|= AMDGPU_PTE_WRITEABLE
;
673 if (flags
& AMDGPU_VM_PAGE_PRT
)
674 pte_flag
|= AMDGPU_PTE_PRT
;
679 static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device
*adev
, uint64_t addr
)
681 BUG_ON(addr
& 0xFFFFFF0000000FFFULL
);
686 * gmc_v8_0_set_fault_enable_default - update VM fault handling
688 * @adev: amdgpu_device pointer
689 * @value: true redirects VM faults to the default page
691 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device
*adev
,
696 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
697 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
698 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
699 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
700 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
701 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
702 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
703 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
704 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
705 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
706 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
707 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
708 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
709 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
710 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
711 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
715 * gmc_v8_0_set_prt - set PRT VM fault
717 * @adev: amdgpu_device pointer
718 * @enable: enable/disable VM fault handling for PRT
720 static void gmc_v8_0_set_prt(struct amdgpu_device
*adev
, bool enable
)
724 if (enable
&& !adev
->mc
.prt_warning
) {
725 dev_warn(adev
->dev
, "Disabling VM faults because of PRT request!\n");
726 adev
->mc
.prt_warning
= true;
729 tmp
= RREG32(mmVM_PRT_CNTL
);
730 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
731 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS
, enable
);
732 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
733 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS
, enable
);
734 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
735 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS
, enable
);
736 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
737 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS
, enable
);
738 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
739 L2_CACHE_STORE_INVALID_ENTRIES
, enable
);
740 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
741 L1_TLB_STORE_INVALID_ENTRIES
, enable
);
742 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
743 MASK_PDE0_FAULT
, enable
);
744 WREG32(mmVM_PRT_CNTL
, tmp
);
747 uint32_t low
= AMDGPU_VA_RESERVED_SIZE
>> AMDGPU_GPU_PAGE_SHIFT
;
748 uint32_t high
= adev
->vm_manager
.max_pfn
;
750 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR
, low
);
751 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR
, low
);
752 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR
, low
);
753 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR
, low
);
754 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR
, high
);
755 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR
, high
);
756 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR
, high
);
757 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR
, high
);
759 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR
, 0xfffffff);
760 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR
, 0xfffffff);
761 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR
, 0xfffffff);
762 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR
, 0xfffffff);
763 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR
, 0x0);
764 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR
, 0x0);
765 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR
, 0x0);
766 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR
, 0x0);
771 * gmc_v8_0_gart_enable - gart enable
773 * @adev: amdgpu_device pointer
775 * This sets up the TLBs, programs the page tables for VMID0,
776 * sets up the hw for VMIDs 1-15 which are allocated on
777 * demand, and sets up the global locations for the LDS, GDS,
778 * and GPUVM for FSA64 clients (CIK).
779 * Returns 0 for success, errors for failure.
781 static int gmc_v8_0_gart_enable(struct amdgpu_device
*adev
)
786 if (adev
->gart
.robj
== NULL
) {
787 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
790 r
= amdgpu_gart_table_vram_pin(adev
);
793 /* Setup TLB control */
794 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
795 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
796 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 1);
797 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE
, 3);
798 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 1);
799 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_APERTURE_UNMAPPED_ACCESS
, 0);
800 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
802 tmp
= RREG32(mmVM_L2_CNTL
);
803 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
804 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
, 1);
805 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
, 1);
806 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
, 1);
807 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, EFFECTIVE_L2_QUEUE_SIZE
, 7);
808 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, CONTEXT1_IDENTITY_ACCESS_MODE
, 1);
809 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY
, 1);
810 WREG32(mmVM_L2_CNTL
, tmp
);
811 tmp
= RREG32(mmVM_L2_CNTL2
);
812 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
813 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
814 WREG32(mmVM_L2_CNTL2
, tmp
);
816 field
= adev
->vm_manager
.fragment_size
;
817 tmp
= RREG32(mmVM_L2_CNTL3
);
818 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
, 1);
819 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, BANK_SELECT
, field
);
820 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_FRAGMENT_SIZE
, field
);
821 WREG32(mmVM_L2_CNTL3
, tmp
);
822 /* XXX: set to enable PTE/PDE in system memory */
823 tmp
= RREG32(mmVM_L2_CNTL4
);
824 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL
, 0);
825 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED
, 0);
826 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP
, 0);
827 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL
, 0);
828 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED
, 0);
829 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP
, 0);
830 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL
, 0);
831 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED
, 0);
832 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP
, 0);
833 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL
, 0);
834 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED
, 0);
835 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP
, 0);
836 WREG32(mmVM_L2_CNTL4
, tmp
);
838 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
, adev
->mc
.gart_start
>> 12);
839 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
, adev
->mc
.gart_end
>> 12);
840 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, adev
->gart
.table_addr
>> 12);
841 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
842 (u32
)(adev
->dummy_page
.addr
>> 12));
843 WREG32(mmVM_CONTEXT0_CNTL2
, 0);
844 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
845 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
846 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
847 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
848 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
850 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR
, 0);
851 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR
, 0);
852 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET
, 0);
854 /* empty context1-15 */
855 /* FIXME start with 4G, once using 2 level pt switch to full
858 /* set vm size, must be a multiple of 4 */
859 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
860 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
, adev
->vm_manager
.max_pfn
- 1);
861 for (i
= 1; i
< 16; i
++) {
863 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
,
864 adev
->gart
.table_addr
>> 12);
866 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8,
867 adev
->gart
.table_addr
>> 12);
870 /* enable context1-15 */
871 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
872 (u32
)(adev
->dummy_page
.addr
>> 12));
873 WREG32(mmVM_CONTEXT1_CNTL2
, 4);
874 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
875 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
, 1);
876 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_DEPTH
, 1);
877 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
878 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
879 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
880 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
881 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, READ_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
882 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
883 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
884 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_BLOCK_SIZE
,
885 adev
->vm_manager
.block_size
- 9);
886 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
887 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
888 gmc_v8_0_set_fault_enable_default(adev
, false);
890 gmc_v8_0_set_fault_enable_default(adev
, true);
892 gmc_v8_0_gart_flush_gpu_tlb(adev
, 0);
893 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
894 (unsigned)(adev
->mc
.gart_size
>> 20),
895 (unsigned long long)adev
->gart
.table_addr
);
896 adev
->gart
.ready
= true;
900 static int gmc_v8_0_gart_init(struct amdgpu_device
*adev
)
904 if (adev
->gart
.robj
) {
905 WARN(1, "R600 PCIE GART already initialized\n");
908 /* Initialize common gart structure */
909 r
= amdgpu_gart_init(adev
);
912 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
913 adev
->gart
.gart_pte_flags
= AMDGPU_PTE_EXECUTABLE
;
914 return amdgpu_gart_table_vram_alloc(adev
);
918 * gmc_v8_0_gart_disable - gart disable
920 * @adev: amdgpu_device pointer
922 * This disables all VM page table (CIK).
924 static void gmc_v8_0_gart_disable(struct amdgpu_device
*adev
)
928 /* Disable all tables */
929 WREG32(mmVM_CONTEXT0_CNTL
, 0);
930 WREG32(mmVM_CONTEXT1_CNTL
, 0);
931 /* Setup TLB control */
932 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
933 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
934 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 0);
935 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 0);
936 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
938 tmp
= RREG32(mmVM_L2_CNTL
);
939 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
940 WREG32(mmVM_L2_CNTL
, tmp
);
941 WREG32(mmVM_L2_CNTL2
, 0);
942 amdgpu_gart_table_vram_unpin(adev
);
946 * gmc_v8_0_gart_fini - vm fini callback
948 * @adev: amdgpu_device pointer
950 * Tears down the driver GART/VM setup (CIK).
952 static void gmc_v8_0_gart_fini(struct amdgpu_device
*adev
)
954 amdgpu_gart_table_vram_free(adev
);
955 amdgpu_gart_fini(adev
);
959 * gmc_v8_0_vm_decode_fault - print human readable fault info
961 * @adev: amdgpu_device pointer
962 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
963 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
965 * Print human readable fault information (CIK).
967 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device
*adev
,
968 u32 status
, u32 addr
, u32 mc_client
)
971 u32 vmid
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
972 u32 protections
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
974 char block
[5] = { mc_client
>> 24, (mc_client
>> 16) & 0xff,
975 (mc_client
>> 8) & 0xff, mc_client
& 0xff, 0 };
977 mc_id
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
980 dev_err(adev
->dev
, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
981 protections
, vmid
, addr
,
982 REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
984 "write" : "read", block
, mc_client
, mc_id
);
987 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type
)
989 switch (mc_seq_vram_type
) {
990 case MC_SEQ_MISC0__MT__GDDR1
:
991 return AMDGPU_VRAM_TYPE_GDDR1
;
992 case MC_SEQ_MISC0__MT__DDR2
:
993 return AMDGPU_VRAM_TYPE_DDR2
;
994 case MC_SEQ_MISC0__MT__GDDR3
:
995 return AMDGPU_VRAM_TYPE_GDDR3
;
996 case MC_SEQ_MISC0__MT__GDDR4
:
997 return AMDGPU_VRAM_TYPE_GDDR4
;
998 case MC_SEQ_MISC0__MT__GDDR5
:
999 return AMDGPU_VRAM_TYPE_GDDR5
;
1000 case MC_SEQ_MISC0__MT__HBM
:
1001 return AMDGPU_VRAM_TYPE_HBM
;
1002 case MC_SEQ_MISC0__MT__DDR3
:
1003 return AMDGPU_VRAM_TYPE_DDR3
;
1005 return AMDGPU_VRAM_TYPE_UNKNOWN
;
1009 static int gmc_v8_0_early_init(void *handle
)
1011 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1013 gmc_v8_0_set_gart_funcs(adev
);
1014 gmc_v8_0_set_irq_funcs(adev
);
1016 adev
->mc
.shared_aperture_start
= 0x2000000000000000ULL
;
1017 adev
->mc
.shared_aperture_end
=
1018 adev
->mc
.shared_aperture_start
+ (4ULL << 30) - 1;
1019 adev
->mc
.private_aperture_start
=
1020 adev
->mc
.shared_aperture_end
+ 1;
1021 adev
->mc
.private_aperture_end
=
1022 adev
->mc
.private_aperture_start
+ (4ULL << 30) - 1;
1027 static int gmc_v8_0_late_init(void *handle
)
1029 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1031 if (amdgpu_vm_fault_stop
!= AMDGPU_VM_FAULT_STOP_ALWAYS
)
1032 return amdgpu_irq_get(adev
, &adev
->mc
.vm_fault
, 0);
1037 #define mmMC_SEQ_MISC0_FIJI 0xA71
1039 static int gmc_v8_0_sw_init(void *handle
)
1043 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1045 if (adev
->flags
& AMD_IS_APU
) {
1046 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
1050 if (adev
->asic_type
== CHIP_FIJI
)
1051 tmp
= RREG32(mmMC_SEQ_MISC0_FIJI
);
1053 tmp
= RREG32(mmMC_SEQ_MISC0
);
1054 tmp
&= MC_SEQ_MISC0__MT__MASK
;
1055 adev
->mc
.vram_type
= gmc_v8_0_convert_vram_type(tmp
);
1058 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 146, &adev
->mc
.vm_fault
);
1062 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 147, &adev
->mc
.vm_fault
);
1066 /* Adjust VM size here.
1067 * Currently set to 4GB ((1 << 20) 4k pages).
1068 * Max GPUVM size for cayman and SI is 40 bits.
1070 amdgpu_vm_adjust_size(adev
, 64, 4);
1071 adev
->vm_manager
.max_pfn
= adev
->vm_manager
.vm_size
<< 18;
1073 /* Set the internal MC address mask
1074 * This is the max address of the GPU's
1075 * internal address space.
1077 adev
->mc
.mc_mask
= 0xffffffffffULL
; /* 40 bit MC */
1079 adev
->mc
.stolen_size
= 256 * 1024;
1081 /* set DMA mask + need_dma32 flags.
1082 * PCIE - can handle 40-bits.
1083 * IGP - can handle 40-bits
1084 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1086 adev
->need_dma32
= false;
1087 dma_bits
= adev
->need_dma32
? 32 : 40;
1088 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
1090 adev
->need_dma32
= true;
1092 pr_warn("amdgpu: No suitable DMA available\n");
1094 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
1096 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
1097 pr_warn("amdgpu: No coherent DMA available\n");
1100 r
= gmc_v8_0_init_microcode(adev
);
1102 DRM_ERROR("Failed to load mc firmware!\n");
1106 r
= gmc_v8_0_mc_init(adev
);
1110 /* Memory manager */
1111 r
= amdgpu_bo_init(adev
);
1115 r
= gmc_v8_0_gart_init(adev
);
1121 * VMID 0 is reserved for System
1122 * amdgpu graphics/compute will use VMIDs 1-7
1123 * amdkfd will use VMIDs 8-15
1125 adev
->vm_manager
.id_mgr
[0].num_ids
= AMDGPU_NUM_OF_VMIDS
;
1126 adev
->vm_manager
.num_level
= 1;
1127 amdgpu_vm_manager_init(adev
);
1129 /* base offset of vram pages */
1130 if (adev
->flags
& AMD_IS_APU
) {
1131 u64 tmp
= RREG32(mmMC_VM_FB_OFFSET
);
1134 adev
->vm_manager
.vram_base_offset
= tmp
;
1136 adev
->vm_manager
.vram_base_offset
= 0;
1142 static int gmc_v8_0_sw_fini(void *handle
)
1144 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1146 amdgpu_vm_manager_fini(adev
);
1147 gmc_v8_0_gart_fini(adev
);
1148 amdgpu_gem_force_release(adev
);
1149 amdgpu_bo_fini(adev
);
1154 static int gmc_v8_0_hw_init(void *handle
)
1157 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1159 gmc_v8_0_init_golden_registers(adev
);
1161 gmc_v8_0_mc_program(adev
);
1163 if (adev
->asic_type
== CHIP_TONGA
) {
1164 r
= gmc_v8_0_tonga_mc_load_microcode(adev
);
1166 DRM_ERROR("Failed to load MC firmware!\n");
1169 } else if (adev
->asic_type
== CHIP_POLARIS11
||
1170 adev
->asic_type
== CHIP_POLARIS10
||
1171 adev
->asic_type
== CHIP_POLARIS12
) {
1172 r
= gmc_v8_0_polaris_mc_load_microcode(adev
);
1174 DRM_ERROR("Failed to load MC firmware!\n");
1179 r
= gmc_v8_0_gart_enable(adev
);
1186 static int gmc_v8_0_hw_fini(void *handle
)
1188 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1190 amdgpu_irq_put(adev
, &adev
->mc
.vm_fault
, 0);
1191 gmc_v8_0_gart_disable(adev
);
1196 static int gmc_v8_0_suspend(void *handle
)
1198 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1200 gmc_v8_0_hw_fini(adev
);
1205 static int gmc_v8_0_resume(void *handle
)
1208 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1210 r
= gmc_v8_0_hw_init(adev
);
1214 amdgpu_vm_reset_all_ids(adev
);
1219 static bool gmc_v8_0_is_idle(void *handle
)
1221 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1222 u32 tmp
= RREG32(mmSRBM_STATUS
);
1224 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1225 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
| SRBM_STATUS__VMC_BUSY_MASK
))
1231 static int gmc_v8_0_wait_for_idle(void *handle
)
1235 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1237 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1238 /* read MC_STATUS */
1239 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__MCB_BUSY_MASK
|
1240 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1241 SRBM_STATUS__MCC_BUSY_MASK
|
1242 SRBM_STATUS__MCD_BUSY_MASK
|
1243 SRBM_STATUS__VMC_BUSY_MASK
|
1244 SRBM_STATUS__VMC1_BUSY_MASK
);
1253 static bool gmc_v8_0_check_soft_reset(void *handle
)
1255 u32 srbm_soft_reset
= 0;
1256 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1257 u32 tmp
= RREG32(mmSRBM_STATUS
);
1259 if (tmp
& SRBM_STATUS__VMC_BUSY_MASK
)
1260 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1261 SRBM_SOFT_RESET
, SOFT_RESET_VMC
, 1);
1263 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1264 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
)) {
1265 if (!(adev
->flags
& AMD_IS_APU
))
1266 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1267 SRBM_SOFT_RESET
, SOFT_RESET_MC
, 1);
1269 if (srbm_soft_reset
) {
1270 adev
->mc
.srbm_soft_reset
= srbm_soft_reset
;
1273 adev
->mc
.srbm_soft_reset
= 0;
1278 static int gmc_v8_0_pre_soft_reset(void *handle
)
1280 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1282 if (!adev
->mc
.srbm_soft_reset
)
1285 gmc_v8_0_mc_stop(adev
);
1286 if (gmc_v8_0_wait_for_idle(adev
)) {
1287 dev_warn(adev
->dev
, "Wait for GMC idle timed out !\n");
1293 static int gmc_v8_0_soft_reset(void *handle
)
1295 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1296 u32 srbm_soft_reset
;
1298 if (!adev
->mc
.srbm_soft_reset
)
1300 srbm_soft_reset
= adev
->mc
.srbm_soft_reset
;
1302 if (srbm_soft_reset
) {
1305 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1306 tmp
|= srbm_soft_reset
;
1307 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1308 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1309 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1313 tmp
&= ~srbm_soft_reset
;
1314 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1315 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1317 /* Wait a little for things to settle down */
1324 static int gmc_v8_0_post_soft_reset(void *handle
)
1326 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1328 if (!adev
->mc
.srbm_soft_reset
)
1331 gmc_v8_0_mc_resume(adev
);
1335 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
1336 struct amdgpu_irq_src
*src
,
1338 enum amdgpu_interrupt_state state
)
1341 u32 bits
= (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1342 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1343 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1344 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1345 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1346 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1347 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
);
1350 case AMDGPU_IRQ_STATE_DISABLE
:
1351 /* system context */
1352 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1354 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1356 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1358 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1360 case AMDGPU_IRQ_STATE_ENABLE
:
1361 /* system context */
1362 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1364 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1366 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1368 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1377 static int gmc_v8_0_process_interrupt(struct amdgpu_device
*adev
,
1378 struct amdgpu_irq_src
*source
,
1379 struct amdgpu_iv_entry
*entry
)
1381 u32 addr
, status
, mc_client
;
1383 if (amdgpu_sriov_vf(adev
)) {
1384 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1385 entry
->src_id
, entry
->src_data
[0]);
1386 dev_err(adev
->dev
, " Can't decode VM fault info here on SRIOV VF\n");
1390 addr
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
);
1391 status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
1392 mc_client
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT
);
1393 /* reset addr and status */
1394 WREG32_P(mmVM_CONTEXT1_CNTL2
, 1, ~1);
1396 if (!addr
&& !status
)
1399 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_FIRST
)
1400 gmc_v8_0_set_fault_enable_default(adev
, false);
1402 if (printk_ratelimit()) {
1403 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1404 entry
->src_id
, entry
->src_data
[0]);
1405 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1407 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1409 gmc_v8_0_vm_decode_fault(adev
, status
, addr
, mc_client
);
1415 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1420 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
)) {
1421 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1422 data
|= MC_HUB_MISC_HUB_CG__ENABLE_MASK
;
1423 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1425 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1426 data
|= MC_HUB_MISC_SIP_CG__ENABLE_MASK
;
1427 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1429 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1430 data
|= MC_HUB_MISC_VM_CG__ENABLE_MASK
;
1431 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1433 data
= RREG32(mmMC_XPB_CLK_GAT
);
1434 data
|= MC_XPB_CLK_GAT__ENABLE_MASK
;
1435 WREG32(mmMC_XPB_CLK_GAT
, data
);
1437 data
= RREG32(mmATC_MISC_CG
);
1438 data
|= ATC_MISC_CG__ENABLE_MASK
;
1439 WREG32(mmATC_MISC_CG
, data
);
1441 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1442 data
|= MC_CITF_MISC_WR_CG__ENABLE_MASK
;
1443 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1445 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1446 data
|= MC_CITF_MISC_RD_CG__ENABLE_MASK
;
1447 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1449 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1450 data
|= MC_CITF_MISC_VM_CG__ENABLE_MASK
;
1451 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1453 data
= RREG32(mmVM_L2_CG
);
1454 data
|= VM_L2_CG__ENABLE_MASK
;
1455 WREG32(mmVM_L2_CG
, data
);
1457 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1458 data
&= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK
;
1459 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1461 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1462 data
&= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK
;
1463 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1465 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1466 data
&= ~MC_HUB_MISC_VM_CG__ENABLE_MASK
;
1467 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1469 data
= RREG32(mmMC_XPB_CLK_GAT
);
1470 data
&= ~MC_XPB_CLK_GAT__ENABLE_MASK
;
1471 WREG32(mmMC_XPB_CLK_GAT
, data
);
1473 data
= RREG32(mmATC_MISC_CG
);
1474 data
&= ~ATC_MISC_CG__ENABLE_MASK
;
1475 WREG32(mmATC_MISC_CG
, data
);
1477 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1478 data
&= ~MC_CITF_MISC_WR_CG__ENABLE_MASK
;
1479 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1481 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1482 data
&= ~MC_CITF_MISC_RD_CG__ENABLE_MASK
;
1483 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1485 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1486 data
&= ~MC_CITF_MISC_VM_CG__ENABLE_MASK
;
1487 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1489 data
= RREG32(mmVM_L2_CG
);
1490 data
&= ~VM_L2_CG__ENABLE_MASK
;
1491 WREG32(mmVM_L2_CG
, data
);
1495 static void fiji_update_mc_light_sleep(struct amdgpu_device
*adev
,
1500 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
)) {
1501 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1502 data
|= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
;
1503 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1505 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1506 data
|= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
;
1507 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1509 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1510 data
|= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1511 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1513 data
= RREG32(mmMC_XPB_CLK_GAT
);
1514 data
|= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
;
1515 WREG32(mmMC_XPB_CLK_GAT
, data
);
1517 data
= RREG32(mmATC_MISC_CG
);
1518 data
|= ATC_MISC_CG__MEM_LS_ENABLE_MASK
;
1519 WREG32(mmATC_MISC_CG
, data
);
1521 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1522 data
|= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
;
1523 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1525 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1526 data
|= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
;
1527 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1529 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1530 data
|= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1531 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1533 data
= RREG32(mmVM_L2_CG
);
1534 data
|= VM_L2_CG__MEM_LS_ENABLE_MASK
;
1535 WREG32(mmVM_L2_CG
, data
);
1537 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1538 data
&= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
;
1539 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1541 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1542 data
&= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
;
1543 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1545 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1546 data
&= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1547 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1549 data
= RREG32(mmMC_XPB_CLK_GAT
);
1550 data
&= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
;
1551 WREG32(mmMC_XPB_CLK_GAT
, data
);
1553 data
= RREG32(mmATC_MISC_CG
);
1554 data
&= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK
;
1555 WREG32(mmATC_MISC_CG
, data
);
1557 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1558 data
&= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
;
1559 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1561 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1562 data
&= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
;
1563 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1565 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1566 data
&= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1567 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1569 data
= RREG32(mmVM_L2_CG
);
1570 data
&= ~VM_L2_CG__MEM_LS_ENABLE_MASK
;
1571 WREG32(mmVM_L2_CG
, data
);
1575 static int gmc_v8_0_set_clockgating_state(void *handle
,
1576 enum amd_clockgating_state state
)
1578 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1580 if (amdgpu_sriov_vf(adev
))
1583 switch (adev
->asic_type
) {
1585 fiji_update_mc_medium_grain_clock_gating(adev
,
1586 state
== AMD_CG_STATE_GATE
);
1587 fiji_update_mc_light_sleep(adev
,
1588 state
== AMD_CG_STATE_GATE
);
1596 static int gmc_v8_0_set_powergating_state(void *handle
,
1597 enum amd_powergating_state state
)
1602 static void gmc_v8_0_get_clockgating_state(void *handle
, u32
*flags
)
1604 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1607 if (amdgpu_sriov_vf(adev
))
1610 /* AMD_CG_SUPPORT_MC_MGCG */
1611 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1612 if (data
& MC_HUB_MISC_HUB_CG__ENABLE_MASK
)
1613 *flags
|= AMD_CG_SUPPORT_MC_MGCG
;
1615 /* AMD_CG_SUPPORT_MC_LS */
1616 if (data
& MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
)
1617 *flags
|= AMD_CG_SUPPORT_MC_LS
;
1620 static const struct amd_ip_funcs gmc_v8_0_ip_funcs
= {
1622 .early_init
= gmc_v8_0_early_init
,
1623 .late_init
= gmc_v8_0_late_init
,
1624 .sw_init
= gmc_v8_0_sw_init
,
1625 .sw_fini
= gmc_v8_0_sw_fini
,
1626 .hw_init
= gmc_v8_0_hw_init
,
1627 .hw_fini
= gmc_v8_0_hw_fini
,
1628 .suspend
= gmc_v8_0_suspend
,
1629 .resume
= gmc_v8_0_resume
,
1630 .is_idle
= gmc_v8_0_is_idle
,
1631 .wait_for_idle
= gmc_v8_0_wait_for_idle
,
1632 .check_soft_reset
= gmc_v8_0_check_soft_reset
,
1633 .pre_soft_reset
= gmc_v8_0_pre_soft_reset
,
1634 .soft_reset
= gmc_v8_0_soft_reset
,
1635 .post_soft_reset
= gmc_v8_0_post_soft_reset
,
1636 .set_clockgating_state
= gmc_v8_0_set_clockgating_state
,
1637 .set_powergating_state
= gmc_v8_0_set_powergating_state
,
1638 .get_clockgating_state
= gmc_v8_0_get_clockgating_state
,
1641 static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs
= {
1642 .flush_gpu_tlb
= gmc_v8_0_gart_flush_gpu_tlb
,
1643 .set_pte_pde
= gmc_v8_0_gart_set_pte_pde
,
1644 .set_prt
= gmc_v8_0_set_prt
,
1645 .get_vm_pte_flags
= gmc_v8_0_get_vm_pte_flags
,
1646 .get_vm_pde
= gmc_v8_0_get_vm_pde
1649 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs
= {
1650 .set
= gmc_v8_0_vm_fault_interrupt_state
,
1651 .process
= gmc_v8_0_process_interrupt
,
1654 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
)
1656 if (adev
->gart
.gart_funcs
== NULL
)
1657 adev
->gart
.gart_funcs
= &gmc_v8_0_gart_funcs
;
1660 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
)
1662 adev
->mc
.vm_fault
.num_types
= 1;
1663 adev
->mc
.vm_fault
.funcs
= &gmc_v8_0_irq_funcs
;
1666 const struct amdgpu_ip_block_version gmc_v8_0_ip_block
=
1668 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1672 .funcs
= &gmc_v8_0_ip_funcs
,
1675 const struct amdgpu_ip_block_version gmc_v8_1_ip_block
=
1677 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1681 .funcs
= &gmc_v8_0_ip_funcs
,
1684 const struct amdgpu_ip_block_version gmc_v8_5_ip_block
=
1686 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1690 .funcs
= &gmc_v8_0_ip_funcs
,