2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "vega10/soc15ip.h"
28 #include "vega10/HDP/hdp_4_0_offset.h"
29 #include "vega10/HDP/hdp_4_0_sh_mask.h"
30 #include "vega10/GC/gc_9_0_sh_mask.h"
31 #include "vega10/vega10_enum.h"
33 #include "soc15_common.h"
35 #include "nbio_v6_1.h"
36 #include "gfxhub_v1_0.h"
37 #include "mmhub_v1_0.h"
39 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
40 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
41 //DF_CS_AON0_DramBaseAddress0
42 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
43 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
44 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
45 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
46 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
47 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
48 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
49 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
50 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
51 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
53 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
54 #define AMDGPU_NUM_OF_VMIDS 8
56 static const u32 golden_settings_vega10_hdp
[] =
58 0xf64, 0x0fffffff, 0x00000000,
59 0xf65, 0x0fffffff, 0x00000000,
60 0xf66, 0x0fffffff, 0x00000000,
61 0xf67, 0x0fffffff, 0x00000000,
62 0xf68, 0x0fffffff, 0x00000000,
63 0xf6a, 0x0fffffff, 0x00000000,
64 0xf6b, 0x0fffffff, 0x00000000,
65 0xf6c, 0x0fffffff, 0x00000000,
66 0xf6d, 0x0fffffff, 0x00000000,
67 0xf6e, 0x0fffffff, 0x00000000,
70 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
71 struct amdgpu_irq_src
*src
,
73 enum amdgpu_interrupt_state state
)
75 struct amdgpu_vmhub
*hub
;
76 u32 tmp
, reg
, bits
, i
;
78 bits
= VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
79 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
80 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
81 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
82 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
83 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
84 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
;
87 case AMDGPU_IRQ_STATE_DISABLE
:
89 hub
= &adev
->vmhub
[AMDGPU_MMHUB
];
90 for (i
= 0; i
< 16; i
++) {
91 reg
= hub
->vm_context0_cntl
+ i
;
98 hub
= &adev
->vmhub
[AMDGPU_GFXHUB
];
99 for (i
= 0; i
< 16; i
++) {
100 reg
= hub
->vm_context0_cntl
+ i
;
106 case AMDGPU_IRQ_STATE_ENABLE
:
108 hub
= &adev
->vmhub
[AMDGPU_MMHUB
];
109 for (i
= 0; i
< 16; i
++) {
110 reg
= hub
->vm_context0_cntl
+ i
;
117 hub
= &adev
->vmhub
[AMDGPU_GFXHUB
];
118 for (i
= 0; i
< 16; i
++) {
119 reg
= hub
->vm_context0_cntl
+ i
;
132 static int gmc_v9_0_process_interrupt(struct amdgpu_device
*adev
,
133 struct amdgpu_irq_src
*source
,
134 struct amdgpu_iv_entry
*entry
)
136 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[entry
->vm_id_src
];
140 addr
= (u64
)entry
->src_data
[0] << 12;
141 addr
|= ((u64
)entry
->src_data
[1] & 0xf) << 44;
143 if (!amdgpu_sriov_vf(adev
)) {
144 status
= RREG32(hub
->vm_l2_pro_fault_status
);
145 WREG32_P(hub
->vm_l2_pro_fault_cntl
, 1, ~1);
148 if (printk_ratelimit()) {
150 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
151 entry
->vm_id_src
? "mmhub" : "gfxhub",
152 entry
->src_id
, entry
->ring_id
, entry
->vm_id
,
154 dev_err(adev
->dev
, " at page 0x%016llx from %d\n",
155 addr
, entry
->client_id
);
156 if (!amdgpu_sriov_vf(adev
))
158 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
165 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs
= {
166 .set
= gmc_v9_0_vm_fault_interrupt_state
,
167 .process
= gmc_v9_0_process_interrupt
,
170 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device
*adev
)
172 adev
->mc
.vm_fault
.num_types
= 1;
173 adev
->mc
.vm_fault
.funcs
= &gmc_v9_0_irq_funcs
;
176 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id
)
180 /* invalidate using legacy mode on vm_id*/
181 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
182 PER_VMID_INVALIDATE_REQ
, 1 << vm_id
);
183 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, FLUSH_TYPE
, 0);
184 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PTES
, 1);
185 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE0
, 1);
186 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE1
, 1);
187 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE2
, 1);
188 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L1_PTES
, 1);
189 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
190 CLEAR_PROTECTION_FAULT_STATUS_ADDR
, 0);
197 * VMID 0 is the physical GPU addresses as used by the kernel.
198 * VMIDs 1-15 are used for userspace clients and are handled
199 * by the amdgpu vm/hsa code.
203 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
205 * @adev: amdgpu_device pointer
206 * @vmid: vm instance to flush
208 * Flush the TLB for the requested page table.
210 static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
213 /* Use register 17 for GART */
214 const unsigned eng
= 17;
217 /* flush hdp cache */
218 nbio_v6_1_hdp_flush(adev
);
220 spin_lock(&adev
->mc
.invalidate_lock
);
222 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
223 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[i
];
224 u32 tmp
= gmc_v9_0_get_invalidate_req(vmid
);
226 WREG32_NO_KIQ(hub
->vm_inv_eng0_req
+ eng
, tmp
);
228 /* Busy wait for ACK.*/
229 for (j
= 0; j
< 100; j
++) {
230 tmp
= RREG32_NO_KIQ(hub
->vm_inv_eng0_ack
+ eng
);
239 /* Wait for ACK with a delay.*/
240 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
241 tmp
= RREG32_NO_KIQ(hub
->vm_inv_eng0_ack
+ eng
);
247 if (j
< adev
->usec_timeout
)
250 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
253 spin_unlock(&adev
->mc
.invalidate_lock
);
257 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
259 * @adev: amdgpu_device pointer
260 * @cpu_pt_addr: cpu address of the page table
261 * @gpu_page_idx: entry in the page table to update
262 * @addr: dst addr to write into pte/pde
263 * @flags: access flags
265 * Update the page tables using the CPU.
267 static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
269 uint32_t gpu_page_idx
,
273 void __iomem
*ptr
= (void *)cpu_pt_addr
;
277 * PTE format on VEGA 10:
286 * 47:12 4k physical page base address
296 * PDE format on VEGA 10:
297 * 63:59 block fragment size
301 * 47:6 physical base address of PD or PTE
309 * The following is for PTE only. GART does not have PDEs.
311 value
= addr
& 0x0000FFFFFFFFF000ULL
;
313 writeq(value
, ptr
+ (gpu_page_idx
* 8));
317 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device
*adev
,
321 uint64_t pte_flag
= 0;
323 if (flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
324 pte_flag
|= AMDGPU_PTE_EXECUTABLE
;
325 if (flags
& AMDGPU_VM_PAGE_READABLE
)
326 pte_flag
|= AMDGPU_PTE_READABLE
;
327 if (flags
& AMDGPU_VM_PAGE_WRITEABLE
)
328 pte_flag
|= AMDGPU_PTE_WRITEABLE
;
330 switch (flags
& AMDGPU_VM_MTYPE_MASK
) {
331 case AMDGPU_VM_MTYPE_DEFAULT
:
332 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_NC
);
334 case AMDGPU_VM_MTYPE_NC
:
335 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_NC
);
337 case AMDGPU_VM_MTYPE_WC
:
338 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_WC
);
340 case AMDGPU_VM_MTYPE_CC
:
341 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_CC
);
343 case AMDGPU_VM_MTYPE_UC
:
344 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_UC
);
347 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_NC
);
351 if (flags
& AMDGPU_VM_PAGE_PRT
)
352 pte_flag
|= AMDGPU_PTE_PRT
;
357 static u64
gmc_v9_0_adjust_mc_addr(struct amdgpu_device
*adev
, u64 mc_addr
)
359 return adev
->vm_manager
.vram_base_offset
+ mc_addr
- adev
->mc
.vram_start
;
362 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs
= {
363 .flush_gpu_tlb
= gmc_v9_0_gart_flush_gpu_tlb
,
364 .set_pte_pde
= gmc_v9_0_gart_set_pte_pde
,
365 .get_vm_pte_flags
= gmc_v9_0_get_vm_pte_flags
,
366 .adjust_mc_addr
= gmc_v9_0_adjust_mc_addr
,
367 .get_invalidate_req
= gmc_v9_0_get_invalidate_req
,
370 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device
*adev
)
372 if (adev
->gart
.gart_funcs
== NULL
)
373 adev
->gart
.gart_funcs
= &gmc_v9_0_gart_funcs
;
376 static int gmc_v9_0_early_init(void *handle
)
378 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
380 gmc_v9_0_set_gart_funcs(adev
);
381 gmc_v9_0_set_irq_funcs(adev
);
386 static int gmc_v9_0_late_init(void *handle
)
388 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
389 return amdgpu_irq_get(adev
, &adev
->mc
.vm_fault
, 0);
392 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device
*adev
,
393 struct amdgpu_mc
*mc
)
396 if (!amdgpu_sriov_vf(adev
))
397 base
= mmhub_v1_0_get_fb_location(adev
);
398 amdgpu_vram_location(adev
, &adev
->mc
, base
);
399 adev
->mc
.gtt_base_align
= 0;
400 amdgpu_gtt_location(adev
, mc
);
404 * gmc_v9_0_mc_init - initialize the memory controller driver params
406 * @adev: amdgpu_device pointer
408 * Look up the amount of vram, vram width, and decide how to place
409 * vram and gart within the GPU's physical address space.
410 * Returns 0 for success.
412 static int gmc_v9_0_mc_init(struct amdgpu_device
*adev
)
415 int chansize
, numchan
;
417 /* hbm memory channel size */
420 tmp
= RREG32(SOC15_REG_OFFSET(DF
, 0, mmDF_CS_AON0_DramBaseAddress0
));
421 tmp
&= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK
;
422 tmp
>>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT
;
453 adev
->mc
.vram_width
= numchan
* chansize
;
455 /* Could aper size report 0 ? */
456 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
457 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
458 /* size in MB on si */
459 adev
->mc
.mc_vram_size
=
460 nbio_v6_1_get_memsize(adev
) * 1024ULL * 1024ULL;
461 adev
->mc
.real_vram_size
= adev
->mc
.mc_vram_size
;
462 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
464 /* In case the PCI BAR is larger than the actual amount of vram */
465 if (adev
->mc
.visible_vram_size
> adev
->mc
.real_vram_size
)
466 adev
->mc
.visible_vram_size
= adev
->mc
.real_vram_size
;
468 /* unless the user had overridden it, set the gart
469 * size equal to the 1024 or vram, whichever is larger.
471 if (amdgpu_gart_size
== -1)
472 adev
->mc
.gtt_size
= max((1024ULL << 20), adev
->mc
.mc_vram_size
);
474 adev
->mc
.gtt_size
= (uint64_t)amdgpu_gart_size
<< 20;
476 gmc_v9_0_vram_gtt_location(adev
, &adev
->mc
);
481 static int gmc_v9_0_gart_init(struct amdgpu_device
*adev
)
485 if (adev
->gart
.robj
) {
486 WARN(1, "VEGA10 PCIE GART already initialized\n");
489 /* Initialize common gart structure */
490 r
= amdgpu_gart_init(adev
);
493 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
494 adev
->gart
.gart_pte_flags
= AMDGPU_PTE_MTYPE(MTYPE_UC
) |
495 AMDGPU_PTE_EXECUTABLE
;
496 return amdgpu_gart_table_vram_alloc(adev
);
501 * VMID 0 is the physical GPU addresses as used by the kernel.
502 * VMIDs 1-15 are used for userspace clients and are handled
503 * by the amdgpu vm/hsa code.
506 * gmc_v9_0_vm_init - vm init callback
508 * @adev: amdgpu_device pointer
510 * Inits vega10 specific vm parameters (number of VMs, base of vram for
511 * VMIDs 1-15) (vega10).
512 * Returns 0 for success.
514 static int gmc_v9_0_vm_init(struct amdgpu_device
*adev
)
518 * VMID 0 is reserved for System
519 * amdgpu graphics/compute will use VMIDs 1-7
520 * amdkfd will use VMIDs 8-15
522 adev
->vm_manager
.num_ids
= AMDGPU_NUM_OF_VMIDS
;
524 /* TODO: fix num_level for APU when updating vm size and block size */
525 if (adev
->flags
& AMD_IS_APU
)
526 adev
->vm_manager
.num_level
= 1;
528 adev
->vm_manager
.num_level
= 3;
529 amdgpu_vm_manager_init(adev
);
531 /* base offset of vram pages */
532 /*XXX This value is not zero for APU*/
533 adev
->vm_manager
.vram_base_offset
= 0;
539 * gmc_v9_0_vm_fini - vm fini callback
541 * @adev: amdgpu_device pointer
543 * Tear down any asic specific VM setup.
545 static void gmc_v9_0_vm_fini(struct amdgpu_device
*adev
)
550 static int gmc_v9_0_sw_init(void *handle
)
554 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
556 spin_lock_init(&adev
->mc
.invalidate_lock
);
558 if (adev
->flags
& AMD_IS_APU
) {
559 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
560 amdgpu_vm_adjust_size(adev
, 64);
562 /* XXX Don't know how to get VRAM type yet. */
563 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_HBM
;
565 * To fulfill 4-level page support,
566 * vm size is 256TB (48bit), maximum size of Vega10,
567 * block size 512 (9bit)
569 adev
->vm_manager
.vm_size
= 1U << 18;
570 adev
->vm_manager
.block_size
= 9;
571 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
572 adev
->vm_manager
.vm_size
,
573 adev
->vm_manager
.block_size
);
576 /* This interrupt is VMC page fault.*/
577 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_VMC
, 0,
579 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_UTCL2
, 0,
585 adev
->vm_manager
.max_pfn
= adev
->vm_manager
.vm_size
<< 18;
587 /* Set the internal MC address mask
588 * This is the max address of the GPU's
589 * internal address space.
591 adev
->mc
.mc_mask
= 0xffffffffffffULL
; /* 48 bit MC */
593 /* set DMA mask + need_dma32 flags.
594 * PCIE - can handle 44-bits.
595 * IGP - can handle 44-bits
596 * PCI - dma32 for legacy pci gart, 44 bits on vega10
598 adev
->need_dma32
= false;
599 dma_bits
= adev
->need_dma32
? 32 : 44;
600 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
602 adev
->need_dma32
= true;
604 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
606 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
608 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
609 printk(KERN_WARNING
"amdgpu: No coherent DMA available.\n");
612 r
= gmc_v9_0_mc_init(adev
);
617 r
= amdgpu_bo_init(adev
);
621 r
= gmc_v9_0_gart_init(adev
);
625 if (!adev
->vm_manager
.enabled
) {
626 r
= gmc_v9_0_vm_init(adev
);
628 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
631 adev
->vm_manager
.enabled
= true;
637 * gmc_v8_0_gart_fini - vm fini callback
639 * @adev: amdgpu_device pointer
641 * Tears down the driver GART/VM setup (CIK).
643 static void gmc_v9_0_gart_fini(struct amdgpu_device
*adev
)
645 amdgpu_gart_table_vram_free(adev
);
646 amdgpu_gart_fini(adev
);
649 static int gmc_v9_0_sw_fini(void *handle
)
651 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
653 if (adev
->vm_manager
.enabled
) {
654 amdgpu_vm_manager_fini(adev
);
655 gmc_v9_0_vm_fini(adev
);
656 adev
->vm_manager
.enabled
= false;
658 gmc_v9_0_gart_fini(adev
);
659 amdgpu_gem_force_release(adev
);
660 amdgpu_bo_fini(adev
);
665 static void gmc_v9_0_init_golden_registers(struct amdgpu_device
*adev
)
667 switch (adev
->asic_type
) {
676 * gmc_v9_0_gart_enable - gart enable
678 * @adev: amdgpu_device pointer
680 static int gmc_v9_0_gart_enable(struct amdgpu_device
*adev
)
686 amdgpu_program_register_sequence(adev
,
687 golden_settings_vega10_hdp
,
688 (const u32
)ARRAY_SIZE(golden_settings_vega10_hdp
));
690 if (adev
->gart
.robj
== NULL
) {
691 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
694 r
= amdgpu_gart_table_vram_pin(adev
);
698 /* After HDP is initialized, flush HDP.*/
699 nbio_v6_1_hdp_flush(adev
);
701 r
= gfxhub_v1_0_gart_enable(adev
);
705 r
= mmhub_v1_0_gart_enable(adev
);
709 tmp
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MISC_CNTL
));
710 tmp
|= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK
;
711 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MISC_CNTL
), tmp
);
713 tmp
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_HOST_PATH_CNTL
));
714 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_HOST_PATH_CNTL
), tmp
);
717 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
722 gfxhub_v1_0_set_fault_enable_default(adev
, value
);
723 mmhub_v1_0_set_fault_enable_default(adev
, value
);
725 gmc_v9_0_gart_flush_gpu_tlb(adev
, 0);
727 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
728 (unsigned)(adev
->mc
.gtt_size
>> 20),
729 (unsigned long long)adev
->gart
.table_addr
);
730 adev
->gart
.ready
= true;
734 static int gmc_v9_0_hw_init(void *handle
)
737 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
739 /* The sequence of these two function calls matters.*/
740 gmc_v9_0_init_golden_registers(adev
);
742 r
= gmc_v9_0_gart_enable(adev
);
748 * gmc_v9_0_gart_disable - gart disable
750 * @adev: amdgpu_device pointer
752 * This disables all VM page table.
754 static void gmc_v9_0_gart_disable(struct amdgpu_device
*adev
)
756 gfxhub_v1_0_gart_disable(adev
);
757 mmhub_v1_0_gart_disable(adev
);
758 amdgpu_gart_table_vram_unpin(adev
);
761 static int gmc_v9_0_hw_fini(void *handle
)
763 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
765 amdgpu_irq_put(adev
, &adev
->mc
.vm_fault
, 0);
766 gmc_v9_0_gart_disable(adev
);
771 static int gmc_v9_0_suspend(void *handle
)
773 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
775 if (adev
->vm_manager
.enabled
) {
776 gmc_v9_0_vm_fini(adev
);
777 adev
->vm_manager
.enabled
= false;
779 gmc_v9_0_hw_fini(adev
);
784 static int gmc_v9_0_resume(void *handle
)
787 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
789 r
= gmc_v9_0_hw_init(adev
);
793 if (!adev
->vm_manager
.enabled
) {
794 r
= gmc_v9_0_vm_init(adev
);
797 "vm manager initialization failed (%d).\n", r
);
800 adev
->vm_manager
.enabled
= true;
806 static bool gmc_v9_0_is_idle(void *handle
)
808 /* MC is always ready in GMC v9.*/
812 static int gmc_v9_0_wait_for_idle(void *handle
)
814 /* There is no need to wait for MC idle in GMC v9.*/
818 static int gmc_v9_0_soft_reset(void *handle
)
820 /* XXX for emulation.*/
824 static int gmc_v9_0_set_clockgating_state(void *handle
,
825 enum amd_clockgating_state state
)
830 static int gmc_v9_0_set_powergating_state(void *handle
,
831 enum amd_powergating_state state
)
836 const struct amd_ip_funcs gmc_v9_0_ip_funcs
= {
838 .early_init
= gmc_v9_0_early_init
,
839 .late_init
= gmc_v9_0_late_init
,
840 .sw_init
= gmc_v9_0_sw_init
,
841 .sw_fini
= gmc_v9_0_sw_fini
,
842 .hw_init
= gmc_v9_0_hw_init
,
843 .hw_fini
= gmc_v9_0_hw_fini
,
844 .suspend
= gmc_v9_0_suspend
,
845 .resume
= gmc_v9_0_resume
,
846 .is_idle
= gmc_v9_0_is_idle
,
847 .wait_for_idle
= gmc_v9_0_wait_for_idle
,
848 .soft_reset
= gmc_v9_0_soft_reset
,
849 .set_clockgating_state
= gmc_v9_0_set_clockgating_state
,
850 .set_powergating_state
= gmc_v9_0_set_powergating_state
,
853 const struct amdgpu_ip_block_version gmc_v9_0_ip_block
=
855 .type
= AMD_IP_BLOCK_TYPE_GMC
,
859 .funcs
= &gmc_v9_0_ip_funcs
,