2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "vega10/soc15ip.h"
28 #include "vega10/HDP/hdp_4_0_offset.h"
29 #include "vega10/HDP/hdp_4_0_sh_mask.h"
30 #include "vega10/GC/gc_9_0_sh_mask.h"
31 #include "vega10/vega10_enum.h"
33 #include "soc15_common.h"
35 #include "nbio_v6_1.h"
36 #include "nbio_v7_0.h"
37 #include "gfxhub_v1_0.h"
38 #include "mmhub_v1_0.h"
40 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
41 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
42 //DF_CS_AON0_DramBaseAddress0
43 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
44 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
45 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
46 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
47 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
48 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
49 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
50 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
51 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
52 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
54 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
55 #define AMDGPU_NUM_OF_VMIDS 8
57 static const u32 golden_settings_vega10_hdp
[] =
59 0xf64, 0x0fffffff, 0x00000000,
60 0xf65, 0x0fffffff, 0x00000000,
61 0xf66, 0x0fffffff, 0x00000000,
62 0xf67, 0x0fffffff, 0x00000000,
63 0xf68, 0x0fffffff, 0x00000000,
64 0xf6a, 0x0fffffff, 0x00000000,
65 0xf6b, 0x0fffffff, 0x00000000,
66 0xf6c, 0x0fffffff, 0x00000000,
67 0xf6d, 0x0fffffff, 0x00000000,
68 0xf6e, 0x0fffffff, 0x00000000,
71 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
72 struct amdgpu_irq_src
*src
,
74 enum amdgpu_interrupt_state state
)
76 struct amdgpu_vmhub
*hub
;
77 u32 tmp
, reg
, bits
, i
;
79 bits
= VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
80 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
81 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
82 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
83 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
84 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
85 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
;
88 case AMDGPU_IRQ_STATE_DISABLE
:
90 hub
= &adev
->vmhub
[AMDGPU_MMHUB
];
91 for (i
= 0; i
< 16; i
++) {
92 reg
= hub
->vm_context0_cntl
+ i
;
99 hub
= &adev
->vmhub
[AMDGPU_GFXHUB
];
100 for (i
= 0; i
< 16; i
++) {
101 reg
= hub
->vm_context0_cntl
+ i
;
107 case AMDGPU_IRQ_STATE_ENABLE
:
109 hub
= &adev
->vmhub
[AMDGPU_MMHUB
];
110 for (i
= 0; i
< 16; i
++) {
111 reg
= hub
->vm_context0_cntl
+ i
;
118 hub
= &adev
->vmhub
[AMDGPU_GFXHUB
];
119 for (i
= 0; i
< 16; i
++) {
120 reg
= hub
->vm_context0_cntl
+ i
;
133 static int gmc_v9_0_process_interrupt(struct amdgpu_device
*adev
,
134 struct amdgpu_irq_src
*source
,
135 struct amdgpu_iv_entry
*entry
)
137 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[entry
->vm_id_src
];
141 addr
= (u64
)entry
->src_data
[0] << 12;
142 addr
|= ((u64
)entry
->src_data
[1] & 0xf) << 44;
144 if (!amdgpu_sriov_vf(adev
)) {
145 status
= RREG32(hub
->vm_l2_pro_fault_status
);
146 WREG32_P(hub
->vm_l2_pro_fault_cntl
, 1, ~1);
149 if (printk_ratelimit()) {
151 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
152 entry
->vm_id_src
? "mmhub" : "gfxhub",
153 entry
->src_id
, entry
->ring_id
, entry
->vm_id
,
155 dev_err(adev
->dev
, " at page 0x%016llx from %d\n",
156 addr
, entry
->client_id
);
157 if (!amdgpu_sriov_vf(adev
))
159 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
166 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs
= {
167 .set
= gmc_v9_0_vm_fault_interrupt_state
,
168 .process
= gmc_v9_0_process_interrupt
,
171 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device
*adev
)
173 adev
->mc
.vm_fault
.num_types
= 1;
174 adev
->mc
.vm_fault
.funcs
= &gmc_v9_0_irq_funcs
;
177 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id
)
181 /* invalidate using legacy mode on vm_id*/
182 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
183 PER_VMID_INVALIDATE_REQ
, 1 << vm_id
);
184 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, FLUSH_TYPE
, 0);
185 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PTES
, 1);
186 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE0
, 1);
187 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE1
, 1);
188 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE2
, 1);
189 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L1_PTES
, 1);
190 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
191 CLEAR_PROTECTION_FAULT_STATUS_ADDR
, 0);
198 * VMID 0 is the physical GPU addresses as used by the kernel.
199 * VMIDs 1-15 are used for userspace clients and are handled
200 * by the amdgpu vm/hsa code.
204 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
206 * @adev: amdgpu_device pointer
207 * @vmid: vm instance to flush
209 * Flush the TLB for the requested page table.
211 static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
214 /* Use register 17 for GART */
215 const unsigned eng
= 17;
218 /* flush hdp cache */
219 if (adev
->flags
& AMD_IS_APU
)
220 nbio_v7_0_hdp_flush(adev
);
222 nbio_v6_1_hdp_flush(adev
);
224 spin_lock(&adev
->mc
.invalidate_lock
);
226 for (i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
) {
227 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[i
];
228 u32 tmp
= gmc_v9_0_get_invalidate_req(vmid
);
230 WREG32_NO_KIQ(hub
->vm_inv_eng0_req
+ eng
, tmp
);
232 /* Busy wait for ACK.*/
233 for (j
= 0; j
< 100; j
++) {
234 tmp
= RREG32_NO_KIQ(hub
->vm_inv_eng0_ack
+ eng
);
243 /* Wait for ACK with a delay.*/
244 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
245 tmp
= RREG32_NO_KIQ(hub
->vm_inv_eng0_ack
+ eng
);
251 if (j
< adev
->usec_timeout
)
254 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
257 spin_unlock(&adev
->mc
.invalidate_lock
);
261 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
263 * @adev: amdgpu_device pointer
264 * @cpu_pt_addr: cpu address of the page table
265 * @gpu_page_idx: entry in the page table to update
266 * @addr: dst addr to write into pte/pde
267 * @flags: access flags
269 * Update the page tables using the CPU.
271 static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
273 uint32_t gpu_page_idx
,
277 void __iomem
*ptr
= (void *)cpu_pt_addr
;
281 * PTE format on VEGA 10:
290 * 47:12 4k physical page base address
300 * PDE format on VEGA 10:
301 * 63:59 block fragment size
305 * 47:6 physical base address of PD or PTE
313 * The following is for PTE only. GART does not have PDEs.
315 value
= addr
& 0x0000FFFFFFFFF000ULL
;
317 writeq(value
, ptr
+ (gpu_page_idx
* 8));
321 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device
*adev
,
325 uint64_t pte_flag
= 0;
327 if (flags
& AMDGPU_VM_PAGE_EXECUTABLE
)
328 pte_flag
|= AMDGPU_PTE_EXECUTABLE
;
329 if (flags
& AMDGPU_VM_PAGE_READABLE
)
330 pte_flag
|= AMDGPU_PTE_READABLE
;
331 if (flags
& AMDGPU_VM_PAGE_WRITEABLE
)
332 pte_flag
|= AMDGPU_PTE_WRITEABLE
;
334 switch (flags
& AMDGPU_VM_MTYPE_MASK
) {
335 case AMDGPU_VM_MTYPE_DEFAULT
:
336 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_NC
);
338 case AMDGPU_VM_MTYPE_NC
:
339 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_NC
);
341 case AMDGPU_VM_MTYPE_WC
:
342 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_WC
);
344 case AMDGPU_VM_MTYPE_CC
:
345 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_CC
);
347 case AMDGPU_VM_MTYPE_UC
:
348 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_UC
);
351 pte_flag
|= AMDGPU_PTE_MTYPE(MTYPE_NC
);
355 if (flags
& AMDGPU_VM_PAGE_PRT
)
356 pte_flag
|= AMDGPU_PTE_PRT
;
361 static u64
gmc_v9_0_get_vm_pde(struct amdgpu_device
*adev
, u64 addr
)
363 addr
= adev
->vm_manager
.vram_base_offset
+ addr
- adev
->mc
.vram_start
;
364 BUG_ON(addr
& 0xFFFF00000000003FULL
);
368 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs
= {
369 .flush_gpu_tlb
= gmc_v9_0_gart_flush_gpu_tlb
,
370 .set_pte_pde
= gmc_v9_0_gart_set_pte_pde
,
371 .get_invalidate_req
= gmc_v9_0_get_invalidate_req
,
372 .get_vm_pte_flags
= gmc_v9_0_get_vm_pte_flags
,
373 .get_vm_pde
= gmc_v9_0_get_vm_pde
376 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device
*adev
)
378 if (adev
->gart
.gart_funcs
== NULL
)
379 adev
->gart
.gart_funcs
= &gmc_v9_0_gart_funcs
;
382 static int gmc_v9_0_early_init(void *handle
)
384 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
386 gmc_v9_0_set_gart_funcs(adev
);
387 gmc_v9_0_set_irq_funcs(adev
);
392 static int gmc_v9_0_late_init(void *handle
)
394 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
395 unsigned vm_inv_eng
[AMDGPU_MAX_VMHUBS
] = { 3, 3 };
398 for(i
= 0; i
< adev
->num_rings
; ++i
) {
399 struct amdgpu_ring
*ring
= adev
->rings
[i
];
400 unsigned vmhub
= ring
->funcs
->vmhub
;
402 ring
->vm_inv_eng
= vm_inv_eng
[vmhub
]++;
403 dev_info(adev
->dev
, "ring %u(%s) uses VM inv eng %u on hub %u\n",
404 ring
->idx
, ring
->name
, ring
->vm_inv_eng
,
408 /* Engine 17 is used for GART flushes */
409 for(i
= 0; i
< AMDGPU_MAX_VMHUBS
; ++i
)
410 BUG_ON(vm_inv_eng
[i
] > 17);
412 return amdgpu_irq_get(adev
, &adev
->mc
.vm_fault
, 0);
415 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device
*adev
,
416 struct amdgpu_mc
*mc
)
419 if (!amdgpu_sriov_vf(adev
))
420 base
= mmhub_v1_0_get_fb_location(adev
);
421 amdgpu_vram_location(adev
, &adev
->mc
, base
);
422 adev
->mc
.gtt_base_align
= 0;
423 amdgpu_gtt_location(adev
, mc
);
424 /* base offset of vram pages */
425 if (adev
->flags
& AMD_IS_APU
)
426 adev
->vm_manager
.vram_base_offset
= gfxhub_v1_0_get_mc_fb_offset(adev
);
428 adev
->vm_manager
.vram_base_offset
= 0;
432 * gmc_v9_0_mc_init - initialize the memory controller driver params
434 * @adev: amdgpu_device pointer
436 * Look up the amount of vram, vram width, and decide how to place
437 * vram and gart within the GPU's physical address space.
438 * Returns 0 for success.
440 static int gmc_v9_0_mc_init(struct amdgpu_device
*adev
)
443 int chansize
, numchan
;
445 /* hbm memory channel size */
448 tmp
= RREG32(SOC15_REG_OFFSET(DF
, 0, mmDF_CS_AON0_DramBaseAddress0
));
449 tmp
&= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK
;
450 tmp
>>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT
;
481 adev
->mc
.vram_width
= numchan
* chansize
;
483 /* Could aper size report 0 ? */
484 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
485 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
486 /* size in MB on si */
487 adev
->mc
.mc_vram_size
=
488 ((adev
->flags
& AMD_IS_APU
) ? nbio_v7_0_get_memsize(adev
) :
489 nbio_v6_1_get_memsize(adev
)) * 1024ULL * 1024ULL;
490 adev
->mc
.real_vram_size
= adev
->mc
.mc_vram_size
;
491 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
493 /* In case the PCI BAR is larger than the actual amount of vram */
494 if (adev
->mc
.visible_vram_size
> adev
->mc
.real_vram_size
)
495 adev
->mc
.visible_vram_size
= adev
->mc
.real_vram_size
;
497 /* unless the user had overridden it, set the gart
498 * size equal to the 1024 or vram, whichever is larger.
500 if (amdgpu_gart_size
== -1)
501 adev
->mc
.gtt_size
= max((AMDGPU_DEFAULT_GTT_SIZE_MB
<< 20),
502 adev
->mc
.mc_vram_size
);
504 adev
->mc
.gtt_size
= (uint64_t)amdgpu_gart_size
<< 20;
506 gmc_v9_0_vram_gtt_location(adev
, &adev
->mc
);
511 static int gmc_v9_0_gart_init(struct amdgpu_device
*adev
)
515 if (adev
->gart
.robj
) {
516 WARN(1, "VEGA10 PCIE GART already initialized\n");
519 /* Initialize common gart structure */
520 r
= amdgpu_gart_init(adev
);
523 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
524 adev
->gart
.gart_pte_flags
= AMDGPU_PTE_MTYPE(MTYPE_UC
) |
525 AMDGPU_PTE_EXECUTABLE
;
526 return amdgpu_gart_table_vram_alloc(adev
);
529 static int gmc_v9_0_sw_init(void *handle
)
533 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
535 gfxhub_v1_0_init(adev
);
536 mmhub_v1_0_init(adev
);
538 spin_lock_init(&adev
->mc
.invalidate_lock
);
540 if (adev
->flags
& AMD_IS_APU
) {
541 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
542 amdgpu_vm_adjust_size(adev
, 64);
544 /* XXX Don't know how to get VRAM type yet. */
545 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_HBM
;
547 * To fulfill 4-level page support,
548 * vm size is 256TB (48bit), maximum size of Vega10,
549 * block size 512 (9bit)
551 adev
->vm_manager
.vm_size
= 1U << 18;
552 adev
->vm_manager
.block_size
= 9;
553 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
554 adev
->vm_manager
.vm_size
,
555 adev
->vm_manager
.block_size
);
558 /* This interrupt is VMC page fault.*/
559 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_VMC
, 0,
561 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_UTCL2
, 0,
567 adev
->vm_manager
.max_pfn
= adev
->vm_manager
.vm_size
<< 18;
569 /* Set the internal MC address mask
570 * This is the max address of the GPU's
571 * internal address space.
573 adev
->mc
.mc_mask
= 0xffffffffffffULL
; /* 48 bit MC */
575 /* set DMA mask + need_dma32 flags.
576 * PCIE - can handle 44-bits.
577 * IGP - can handle 44-bits
578 * PCI - dma32 for legacy pci gart, 44 bits on vega10
580 adev
->need_dma32
= false;
581 dma_bits
= adev
->need_dma32
? 32 : 44;
582 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
584 adev
->need_dma32
= true;
586 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
588 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
590 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
591 printk(KERN_WARNING
"amdgpu: No coherent DMA available.\n");
594 r
= gmc_v9_0_mc_init(adev
);
599 r
= amdgpu_bo_init(adev
);
603 r
= gmc_v9_0_gart_init(adev
);
609 * VMID 0 is reserved for System
610 * amdgpu graphics/compute will use VMIDs 1-7
611 * amdkfd will use VMIDs 8-15
613 adev
->vm_manager
.id_mgr
[AMDGPU_GFXHUB
].num_ids
= AMDGPU_NUM_OF_VMIDS
;
614 adev
->vm_manager
.id_mgr
[AMDGPU_MMHUB
].num_ids
= AMDGPU_NUM_OF_VMIDS
;
616 /* TODO: fix num_level for APU when updating vm size and block size */
617 if (adev
->flags
& AMD_IS_APU
)
618 adev
->vm_manager
.num_level
= 1;
620 adev
->vm_manager
.num_level
= 3;
621 amdgpu_vm_manager_init(adev
);
627 * gmc_v8_0_gart_fini - vm fini callback
629 * @adev: amdgpu_device pointer
631 * Tears down the driver GART/VM setup (CIK).
633 static void gmc_v9_0_gart_fini(struct amdgpu_device
*adev
)
635 amdgpu_gart_table_vram_free(adev
);
636 amdgpu_gart_fini(adev
);
639 static int gmc_v9_0_sw_fini(void *handle
)
641 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
643 amdgpu_vm_manager_fini(adev
);
644 gmc_v9_0_gart_fini(adev
);
645 amdgpu_gem_force_release(adev
);
646 amdgpu_bo_fini(adev
);
651 static void gmc_v9_0_init_golden_registers(struct amdgpu_device
*adev
)
653 switch (adev
->asic_type
) {
664 * gmc_v9_0_gart_enable - gart enable
666 * @adev: amdgpu_device pointer
668 static int gmc_v9_0_gart_enable(struct amdgpu_device
*adev
)
674 amdgpu_program_register_sequence(adev
,
675 golden_settings_vega10_hdp
,
676 (const u32
)ARRAY_SIZE(golden_settings_vega10_hdp
));
678 if (adev
->gart
.robj
== NULL
) {
679 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
682 r
= amdgpu_gart_table_vram_pin(adev
);
686 /* After HDP is initialized, flush HDP.*/
687 if (adev
->flags
& AMD_IS_APU
)
688 nbio_v7_0_hdp_flush(adev
);
690 nbio_v6_1_hdp_flush(adev
);
692 r
= gfxhub_v1_0_gart_enable(adev
);
696 r
= mmhub_v1_0_gart_enable(adev
);
700 tmp
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MISC_CNTL
));
701 tmp
|= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK
;
702 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_MISC_CNTL
), tmp
);
704 tmp
= RREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_HOST_PATH_CNTL
));
705 WREG32(SOC15_REG_OFFSET(HDP
, 0, mmHDP_HOST_PATH_CNTL
), tmp
);
708 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
713 gfxhub_v1_0_set_fault_enable_default(adev
, value
);
714 mmhub_v1_0_set_fault_enable_default(adev
, value
);
716 gmc_v9_0_gart_flush_gpu_tlb(adev
, 0);
718 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
719 (unsigned)(adev
->mc
.gtt_size
>> 20),
720 (unsigned long long)adev
->gart
.table_addr
);
721 adev
->gart
.ready
= true;
725 static int gmc_v9_0_hw_init(void *handle
)
728 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
730 /* The sequence of these two function calls matters.*/
731 gmc_v9_0_init_golden_registers(adev
);
733 r
= gmc_v9_0_gart_enable(adev
);
739 * gmc_v9_0_gart_disable - gart disable
741 * @adev: amdgpu_device pointer
743 * This disables all VM page table.
745 static void gmc_v9_0_gart_disable(struct amdgpu_device
*adev
)
747 gfxhub_v1_0_gart_disable(adev
);
748 mmhub_v1_0_gart_disable(adev
);
749 amdgpu_gart_table_vram_unpin(adev
);
752 static int gmc_v9_0_hw_fini(void *handle
)
754 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
756 if (amdgpu_sriov_vf(adev
)) {
757 /* full access mode, so don't touch any GMC register */
758 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
762 amdgpu_irq_put(adev
, &adev
->mc
.vm_fault
, 0);
763 gmc_v9_0_gart_disable(adev
);
768 static int gmc_v9_0_suspend(void *handle
)
770 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
772 gmc_v9_0_hw_fini(adev
);
777 static int gmc_v9_0_resume(void *handle
)
780 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
782 r
= gmc_v9_0_hw_init(adev
);
786 amdgpu_vm_reset_all_ids(adev
);
791 static bool gmc_v9_0_is_idle(void *handle
)
793 /* MC is always ready in GMC v9.*/
797 static int gmc_v9_0_wait_for_idle(void *handle
)
799 /* There is no need to wait for MC idle in GMC v9.*/
803 static int gmc_v9_0_soft_reset(void *handle
)
805 /* XXX for emulation.*/
809 static int gmc_v9_0_set_clockgating_state(void *handle
,
810 enum amd_clockgating_state state
)
812 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
814 return mmhub_v1_0_set_clockgating(adev
, state
);
817 static int gmc_v9_0_set_powergating_state(void *handle
,
818 enum amd_powergating_state state
)
823 const struct amd_ip_funcs gmc_v9_0_ip_funcs
= {
825 .early_init
= gmc_v9_0_early_init
,
826 .late_init
= gmc_v9_0_late_init
,
827 .sw_init
= gmc_v9_0_sw_init
,
828 .sw_fini
= gmc_v9_0_sw_fini
,
829 .hw_init
= gmc_v9_0_hw_init
,
830 .hw_fini
= gmc_v9_0_hw_fini
,
831 .suspend
= gmc_v9_0_suspend
,
832 .resume
= gmc_v9_0_resume
,
833 .is_idle
= gmc_v9_0_is_idle
,
834 .wait_for_idle
= gmc_v9_0_wait_for_idle
,
835 .soft_reset
= gmc_v9_0_soft_reset
,
836 .set_clockgating_state
= gmc_v9_0_set_clockgating_state
,
837 .set_powergating_state
= gmc_v9_0_set_powergating_state
,
840 const struct amdgpu_ip_block_version gmc_v9_0_ip_block
=
842 .type
= AMD_IP_BLOCK_TYPE_GMC
,
846 .funcs
= &gmc_v9_0_ip_funcs
,