2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
30 #include "athub/athub_2_0_0_sh_mask.h"
31 #include "athub/athub_2_0_0_offset.h"
32 #include "dcn/dcn_2_0_0_offset.h"
33 #include "dcn/dcn_2_0_0_sh_mask.h"
34 #include "oss/osssys_5_0_0_offset.h"
35 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36 #include "navi10_enum.h"
40 #include "soc15_common.h"
42 #include "nbio_v2_3.h"
44 #include "gfxhub_v2_0.h"
45 #include "gfxhub_v2_1.h"
46 #include "mmhub_v2_0.h"
47 #include "mmhub_v2_3.h"
48 #include "athub_v2_0.h"
49 #include "athub_v2_1.h"
52 static const struct soc15_reg_golden golden_settings_navi10_hdp
[] =
54 /* TODO add golden setting for hdp */
58 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device
*adev
,
59 struct amdgpu_irq_src
*src
,
61 enum amdgpu_interrupt_state state
)
67 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
68 struct amdgpu_irq_src
*src
, unsigned type
,
69 enum amdgpu_interrupt_state state
)
72 case AMDGPU_IRQ_STATE_DISABLE
:
74 amdgpu_gmc_set_vm_fault_masks(adev
, AMDGPU_MMHUB_0
, false);
76 amdgpu_gmc_set_vm_fault_masks(adev
, AMDGPU_GFXHUB_0
, false);
78 case AMDGPU_IRQ_STATE_ENABLE
:
80 amdgpu_gmc_set_vm_fault_masks(adev
, AMDGPU_MMHUB_0
, true);
82 amdgpu_gmc_set_vm_fault_masks(adev
, AMDGPU_GFXHUB_0
, true);
91 static int gmc_v10_0_process_interrupt(struct amdgpu_device
*adev
,
92 struct amdgpu_irq_src
*source
,
93 struct amdgpu_iv_entry
*entry
)
95 bool retry_fault
= !!(entry
->src_data
[1] & 0x80);
96 bool write_fault
= !!(entry
->src_data
[1] & 0x20);
97 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[entry
->vmid_src
];
98 struct amdgpu_task_info task_info
;
102 addr
= (u64
)entry
->src_data
[0] << 12;
103 addr
|= ((u64
)entry
->src_data
[1] & 0xf) << 44;
106 /* Returning 1 here also prevents sending the IV to the KFD */
108 /* Process it onyl if it's the first fault for this address */
109 if (entry
->ih
!= &adev
->irq
.ih_soft
&&
110 amdgpu_gmc_filter_faults(adev
, addr
, entry
->pasid
,
114 /* Delegate it to a different ring if the hardware hasn't
117 if (entry
->ih
== &adev
->irq
.ih
) {
118 amdgpu_irq_delegate(adev
, entry
, 8);
122 /* Try to handle the recoverable page faults by filling page
125 if (amdgpu_vm_handle_fault(adev
, entry
->pasid
, addr
, write_fault
))
129 if (!amdgpu_sriov_vf(adev
)) {
131 * Issue a dummy read to wait for the status register to
132 * be updated to avoid reading an incorrect value due to
133 * the new fast GRBM interface.
135 if ((entry
->vmid_src
== AMDGPU_GFXHUB_0
) &&
136 (adev
->asic_type
< CHIP_SIENNA_CICHLID
))
137 RREG32(hub
->vm_l2_pro_fault_status
);
139 status
= RREG32(hub
->vm_l2_pro_fault_status
);
140 WREG32_P(hub
->vm_l2_pro_fault_cntl
, 1, ~1);
143 if (!printk_ratelimit())
146 memset(&task_info
, 0, sizeof(struct amdgpu_task_info
));
147 amdgpu_vm_get_task_info(adev
, entry
->pasid
, &task_info
);
150 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
151 "for process %s pid %d thread %s pid %d)\n",
152 entry
->vmid_src
? "mmhub" : "gfxhub",
153 entry
->src_id
, entry
->ring_id
, entry
->vmid
,
154 entry
->pasid
, task_info
.process_name
, task_info
.tgid
,
155 task_info
.task_name
, task_info
.pid
);
156 dev_err(adev
->dev
, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
157 addr
, entry
->client_id
,
158 soc15_ih_clientid_name
[entry
->client_id
]);
160 if (!amdgpu_sriov_vf(adev
))
161 hub
->vmhub_funcs
->print_l2_protection_fault_status(adev
,
167 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs
= {
168 .set
= gmc_v10_0_vm_fault_interrupt_state
,
169 .process
= gmc_v10_0_process_interrupt
,
172 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs
= {
173 .set
= gmc_v10_0_ecc_interrupt_state
,
174 .process
= amdgpu_umc_process_ecc_irq
,
177 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device
*adev
)
179 adev
->gmc
.vm_fault
.num_types
= 1;
180 adev
->gmc
.vm_fault
.funcs
= &gmc_v10_0_irq_funcs
;
182 if (!amdgpu_sriov_vf(adev
)) {
183 adev
->gmc
.ecc_irq
.num_types
= 1;
184 adev
->gmc
.ecc_irq
.funcs
= &gmc_v10_0_ecc_funcs
;
189 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
191 * @adev: amdgpu_device pointer
195 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device
*adev
,
198 return ((vmhub
== AMDGPU_MMHUB_0
||
199 vmhub
== AMDGPU_MMHUB_1
) &&
200 (!amdgpu_sriov_vf(adev
)));
203 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
204 struct amdgpu_device
*adev
,
205 uint8_t vmid
, uint16_t *p_pasid
)
209 value
= RREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATC_VMID0_PASID_MAPPING
)
211 *p_pasid
= value
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
213 return !!(value
& ATC_VMID0_PASID_MAPPING__VALID_MASK
);
218 * VMID 0 is the physical GPU addresses as used by the kernel.
219 * VMIDs 1-15 are used for userspace clients and are handled
220 * by the amdgpu vm/hsa code.
223 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device
*adev
, uint32_t vmid
,
224 unsigned int vmhub
, uint32_t flush_type
)
226 bool use_semaphore
= gmc_v10_0_use_invalidate_semaphore(adev
, vmhub
);
227 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[vmhub
];
228 u32 inv_req
= hub
->vmhub_funcs
->get_invalidate_req(vmid
, flush_type
);
230 /* Use register 17 for GART */
231 const unsigned eng
= 17;
233 unsigned char hub_ip
= 0;
235 hub_ip
= (vmhub
== AMDGPU_GFXHUB_0
) ?
236 GC_HWIP
: MMHUB_HWIP
;
238 spin_lock(&adev
->gmc
.invalidate_lock
);
240 * It may lose gpuvm invalidate acknowldege state across power-gating
241 * off cycle, add semaphore acquire before invalidation and semaphore
242 * release after invalidation to avoid entering power gated state
246 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
248 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
249 /* a read return value of 1 means semaphore acuqire */
250 tmp
= RREG32_RLC_NO_KIQ(hub
->vm_inv_eng0_sem
+
251 hub
->eng_distance
* eng
, hub_ip
);
258 if (i
>= adev
->usec_timeout
)
259 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
262 WREG32_RLC_NO_KIQ(hub
->vm_inv_eng0_req
+
263 hub
->eng_distance
* eng
,
267 * Issue a dummy read to wait for the ACK register to be cleared
268 * to avoid a false ACK due to the new fast GRBM interface.
270 if ((vmhub
== AMDGPU_GFXHUB_0
) &&
271 (adev
->asic_type
< CHIP_SIENNA_CICHLID
))
272 RREG32_RLC_NO_KIQ(hub
->vm_inv_eng0_req
+
273 hub
->eng_distance
* eng
, hub_ip
);
275 /* Wait for ACK with a delay.*/
276 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
277 tmp
= RREG32_RLC_NO_KIQ(hub
->vm_inv_eng0_ack
+
278 hub
->eng_distance
* eng
, hub_ip
);
287 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
290 * add semaphore release after invalidation,
291 * write with 0 means semaphore release
293 WREG32_RLC_NO_KIQ(hub
->vm_inv_eng0_sem
+
294 hub
->eng_distance
* eng
, 0, hub_ip
);
296 spin_unlock(&adev
->gmc
.invalidate_lock
);
298 if (i
< adev
->usec_timeout
)
301 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub
);
305 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
307 * @adev: amdgpu_device pointer
308 * @vmid: vm instance to flush
310 * @flush_type: the flush type
312 * Flush the TLB for the requested page table.
314 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device
*adev
, uint32_t vmid
,
315 uint32_t vmhub
, uint32_t flush_type
)
317 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
318 struct dma_fence
*fence
;
319 struct amdgpu_job
*job
;
323 /* flush hdp cache */
324 adev
->hdp
.funcs
->flush_hdp(adev
, NULL
);
326 /* For SRIOV run time, driver shouldn't access the register through MMIO
327 * Directly use kiq to do the vm invalidation instead
329 if (adev
->gfx
.kiq
.ring
.sched
.ready
&&
330 (amdgpu_sriov_runtime(adev
) || !amdgpu_sriov_vf(adev
)) &&
331 down_read_trylock(&adev
->reset_sem
)) {
332 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[vmhub
];
333 const unsigned eng
= 17;
334 u32 inv_req
= hub
->vmhub_funcs
->get_invalidate_req(vmid
, flush_type
);
335 u32 req
= hub
->vm_inv_eng0_req
+ hub
->eng_distance
* eng
;
336 u32 ack
= hub
->vm_inv_eng0_ack
+ hub
->eng_distance
* eng
;
338 amdgpu_virt_kiq_reg_write_reg_wait(adev
, req
, ack
, inv_req
,
341 up_read(&adev
->reset_sem
);
345 mutex_lock(&adev
->mman
.gtt_window_lock
);
347 if (vmhub
== AMDGPU_MMHUB_0
) {
348 gmc_v10_0_flush_vm_hub(adev
, vmid
, AMDGPU_MMHUB_0
, 0);
349 mutex_unlock(&adev
->mman
.gtt_window_lock
);
353 BUG_ON(vmhub
!= AMDGPU_GFXHUB_0
);
355 if (!adev
->mman
.buffer_funcs_enabled
||
356 !adev
->ib_pool_ready
||
357 amdgpu_in_reset(adev
) ||
358 ring
->sched
.ready
== false) {
359 gmc_v10_0_flush_vm_hub(adev
, vmid
, AMDGPU_GFXHUB_0
, 0);
360 mutex_unlock(&adev
->mman
.gtt_window_lock
);
364 /* The SDMA on Navi has a bug which can theoretically result in memory
365 * corruption if an invalidation happens at the same time as an VA
366 * translation. Avoid this by doing the invalidation from the SDMA
369 r
= amdgpu_job_alloc_with_ib(adev
, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE
,
374 job
->vm_pd_addr
= amdgpu_gmc_pd_addr(adev
->gart
.bo
);
375 job
->vm_needs_flush
= true;
376 job
->ibs
->ptr
[job
->ibs
->length_dw
++] = ring
->funcs
->nop
;
377 amdgpu_ring_pad_ib(ring
, &job
->ibs
[0]);
378 r
= amdgpu_job_submit(job
, &adev
->mman
.entity
,
379 AMDGPU_FENCE_OWNER_UNDEFINED
, &fence
);
383 mutex_unlock(&adev
->mman
.gtt_window_lock
);
385 dma_fence_wait(fence
, false);
386 dma_fence_put(fence
);
391 amdgpu_job_free(job
);
394 mutex_unlock(&adev
->mman
.gtt_window_lock
);
395 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r
);
399 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
401 * @adev: amdgpu_device pointer
402 * @pasid: pasid to be flush
403 * @flush_type: the flush type
404 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
406 * Flush the TLB for the requested pasid.
408 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device
*adev
,
409 uint16_t pasid
, uint32_t flush_type
,
415 uint16_t queried_pasid
;
417 struct amdgpu_ring
*ring
= &adev
->gfx
.kiq
.ring
;
418 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
420 if (amdgpu_emu_mode
== 0 && ring
->sched
.ready
) {
421 spin_lock(&adev
->gfx
.kiq
.ring_lock
);
422 /* 2 dwords flush + 8 dwords fence */
423 amdgpu_ring_alloc(ring
, kiq
->pmf
->invalidate_tlbs_size
+ 8);
424 kiq
->pmf
->kiq_invalidate_tlbs(ring
,
425 pasid
, flush_type
, all_hub
);
426 r
= amdgpu_fence_emit_polling(ring
, &seq
, MAX_KIQ_REG_WAIT
);
428 amdgpu_ring_undo(ring
);
429 spin_unlock(&adev
->gfx
.kiq
.ring_lock
);
433 amdgpu_ring_commit(ring
);
434 spin_unlock(&adev
->gfx
.kiq
.ring_lock
);
435 r
= amdgpu_fence_wait_polling(ring
, seq
, adev
->usec_timeout
);
437 dev_err(adev
->dev
, "wait for kiq fence error: %ld.\n", r
);
444 for (vmid
= 1; vmid
< AMDGPU_NUM_VMID
; vmid
++) {
446 ret
= gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev
, vmid
,
448 if (ret
&& queried_pasid
== pasid
) {
450 for (i
= 0; i
< adev
->num_vmhubs
; i
++)
451 gmc_v10_0_flush_gpu_tlb(adev
, vmid
,
454 gmc_v10_0_flush_gpu_tlb(adev
, vmid
,
455 AMDGPU_GFXHUB_0
, flush_type
);
464 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring
*ring
,
465 unsigned vmid
, uint64_t pd_addr
)
467 bool use_semaphore
= gmc_v10_0_use_invalidate_semaphore(ring
->adev
, ring
->funcs
->vmhub
);
468 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
469 uint32_t req
= hub
->vmhub_funcs
->get_invalidate_req(vmid
, 0);
470 unsigned eng
= ring
->vm_inv_eng
;
473 * It may lose gpuvm invalidate acknowldege state across power-gating
474 * off cycle, add semaphore acquire before invalidation and semaphore
475 * release after invalidation to avoid entering power gated state
479 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
481 /* a read return value of 1 means semaphore acuqire */
482 amdgpu_ring_emit_reg_wait(ring
,
483 hub
->vm_inv_eng0_sem
+
484 hub
->eng_distance
* eng
, 0x1, 0x1);
486 amdgpu_ring_emit_wreg(ring
, hub
->ctx0_ptb_addr_lo32
+
487 (hub
->ctx_addr_distance
* vmid
),
488 lower_32_bits(pd_addr
));
490 amdgpu_ring_emit_wreg(ring
, hub
->ctx0_ptb_addr_hi32
+
491 (hub
->ctx_addr_distance
* vmid
),
492 upper_32_bits(pd_addr
));
494 amdgpu_ring_emit_reg_write_reg_wait(ring
, hub
->vm_inv_eng0_req
+
495 hub
->eng_distance
* eng
,
496 hub
->vm_inv_eng0_ack
+
497 hub
->eng_distance
* eng
,
500 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
503 * add semaphore release after invalidation,
504 * write with 0 means semaphore release
506 amdgpu_ring_emit_wreg(ring
, hub
->vm_inv_eng0_sem
+
507 hub
->eng_distance
* eng
, 0);
512 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring
*ring
, unsigned vmid
,
515 struct amdgpu_device
*adev
= ring
->adev
;
518 if (ring
->funcs
->vmhub
== AMDGPU_GFXHUB_0
)
519 reg
= SOC15_REG_OFFSET(OSSSYS
, 0, mmIH_VMID_0_LUT
) + vmid
;
521 reg
= SOC15_REG_OFFSET(OSSSYS
, 0, mmIH_VMID_0_LUT_MM
) + vmid
;
523 amdgpu_ring_emit_wreg(ring
, reg
, pasid
);
527 * PTE format on NAVI 10:
529 * 58 reserved and for sienna_cichlid is used for MALL noalloc
537 * 47:12 4k physical page base address
547 * PDE format on NAVI 10:
548 * 63:59 block fragment size
552 * 47:6 physical base address of PD or PTE
559 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device
*adev
, uint32_t flags
)
562 case AMDGPU_VM_MTYPE_DEFAULT
:
563 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC
);
564 case AMDGPU_VM_MTYPE_NC
:
565 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC
);
566 case AMDGPU_VM_MTYPE_WC
:
567 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC
);
568 case AMDGPU_VM_MTYPE_CC
:
569 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC
);
570 case AMDGPU_VM_MTYPE_UC
:
571 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC
);
573 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC
);
577 static void gmc_v10_0_get_vm_pde(struct amdgpu_device
*adev
, int level
,
578 uint64_t *addr
, uint64_t *flags
)
580 if (!(*flags
& AMDGPU_PDE_PTE
) && !(*flags
& AMDGPU_PTE_SYSTEM
))
581 *addr
= amdgpu_gmc_vram_mc2pa(adev
, *addr
);
582 BUG_ON(*addr
& 0xFFFF00000000003FULL
);
584 if (!adev
->gmc
.translate_further
)
587 if (level
== AMDGPU_VM_PDB1
) {
588 /* Set the block fragment size */
589 if (!(*flags
& AMDGPU_PDE_PTE
))
590 *flags
|= AMDGPU_PDE_BFS(0x9);
592 } else if (level
== AMDGPU_VM_PDB0
) {
593 if (*flags
& AMDGPU_PDE_PTE
)
594 *flags
&= ~AMDGPU_PDE_PTE
;
596 *flags
|= AMDGPU_PTE_TF
;
600 static void gmc_v10_0_get_vm_pte(struct amdgpu_device
*adev
,
601 struct amdgpu_bo_va_mapping
*mapping
,
604 *flags
&= ~AMDGPU_PTE_EXECUTABLE
;
605 *flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
607 *flags
&= ~AMDGPU_PTE_MTYPE_NV10_MASK
;
608 *flags
|= (mapping
->flags
& AMDGPU_PTE_MTYPE_NV10_MASK
);
610 if (mapping
->flags
& AMDGPU_PTE_PRT
) {
611 *flags
|= AMDGPU_PTE_PRT
;
612 *flags
|= AMDGPU_PTE_SNOOPED
;
613 *flags
|= AMDGPU_PTE_LOG
;
614 *flags
|= AMDGPU_PTE_SYSTEM
;
615 *flags
&= ~AMDGPU_PTE_VALID
;
619 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device
*adev
)
621 u32 d1vga_control
= RREG32_SOC15(DCE
, 0, mmD1VGA_CONTROL
);
624 if (REG_GET_FIELD(d1vga_control
, D1VGA_CONTROL
, D1VGA_MODE_ENABLE
)) {
625 size
= AMDGPU_VBIOS_VGA_ALLOCATION
;
630 viewport
= RREG32_SOC15(DCE
, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
);
631 pitch
= RREG32_SOC15(DCE
, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH
);
632 size
= (REG_GET_FIELD(viewport
,
633 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
, PRI_VIEWPORT_HEIGHT
) *
634 REG_GET_FIELD(pitch
, HUBPREQ0_DCSURF_SURFACE_PITCH
, PITCH
) *
641 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs
= {
642 .flush_gpu_tlb
= gmc_v10_0_flush_gpu_tlb
,
643 .flush_gpu_tlb_pasid
= gmc_v10_0_flush_gpu_tlb_pasid
,
644 .emit_flush_gpu_tlb
= gmc_v10_0_emit_flush_gpu_tlb
,
645 .emit_pasid_mapping
= gmc_v10_0_emit_pasid_mapping
,
646 .map_mtype
= gmc_v10_0_map_mtype
,
647 .get_vm_pde
= gmc_v10_0_get_vm_pde
,
648 .get_vm_pte
= gmc_v10_0_get_vm_pte
,
649 .get_vbios_fb_size
= gmc_v10_0_get_vbios_fb_size
,
652 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device
*adev
)
654 if (adev
->gmc
.gmc_funcs
== NULL
)
655 adev
->gmc
.gmc_funcs
= &gmc_v10_0_gmc_funcs
;
658 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device
*adev
)
660 switch (adev
->asic_type
) {
661 case CHIP_SIENNA_CICHLID
:
662 adev
->umc
.max_ras_err_cnt_per_query
= UMC_V8_7_TOTAL_CHANNEL_NUM
;
663 adev
->umc
.channel_inst_num
= UMC_V8_7_CHANNEL_INSTANCE_NUM
;
664 adev
->umc
.umc_inst_num
= UMC_V8_7_UMC_INSTANCE_NUM
;
665 adev
->umc
.channel_offs
= UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA
;
666 adev
->umc
.channel_idx_tbl
= &umc_v8_7_channel_idx_tbl
[0][0];
667 adev
->umc
.ras_funcs
= &umc_v8_7_ras_funcs
;
675 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device
*adev
)
677 switch (adev
->asic_type
) {
679 case CHIP_YELLOW_CARP
:
680 adev
->mmhub
.funcs
= &mmhub_v2_3_funcs
;
683 adev
->mmhub
.funcs
= &mmhub_v2_0_funcs
;
688 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device
*adev
)
690 switch (adev
->asic_type
) {
691 case CHIP_SIENNA_CICHLID
:
692 case CHIP_NAVY_FLOUNDER
:
694 case CHIP_DIMGREY_CAVEFISH
:
695 case CHIP_BEIGE_GOBY
:
696 case CHIP_YELLOW_CARP
:
697 adev
->gfxhub
.funcs
= &gfxhub_v2_1_funcs
;
700 adev
->gfxhub
.funcs
= &gfxhub_v2_0_funcs
;
706 static int gmc_v10_0_early_init(void *handle
)
708 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
710 gmc_v10_0_set_mmhub_funcs(adev
);
711 gmc_v10_0_set_gfxhub_funcs(adev
);
712 gmc_v10_0_set_gmc_funcs(adev
);
713 gmc_v10_0_set_irq_funcs(adev
);
714 gmc_v10_0_set_umc_funcs(adev
);
716 adev
->gmc
.shared_aperture_start
= 0x2000000000000000ULL
;
717 adev
->gmc
.shared_aperture_end
=
718 adev
->gmc
.shared_aperture_start
+ (4ULL << 30) - 1;
719 adev
->gmc
.private_aperture_start
= 0x1000000000000000ULL
;
720 adev
->gmc
.private_aperture_end
=
721 adev
->gmc
.private_aperture_start
+ (4ULL << 30) - 1;
726 static int gmc_v10_0_late_init(void *handle
)
728 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
731 r
= amdgpu_gmc_allocate_vm_inv_eng(adev
);
735 r
= amdgpu_gmc_ras_late_init(adev
);
739 return amdgpu_irq_get(adev
, &adev
->gmc
.vm_fault
, 0);
742 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device
*adev
,
743 struct amdgpu_gmc
*mc
)
747 base
= adev
->gfxhub
.funcs
->get_fb_location(adev
);
749 /* add the xgmi offset of the physical node */
750 base
+= adev
->gmc
.xgmi
.physical_node_id
* adev
->gmc
.xgmi
.node_segment_size
;
752 amdgpu_gmc_vram_location(adev
, &adev
->gmc
, base
);
753 amdgpu_gmc_gart_location(adev
, mc
);
754 amdgpu_gmc_agp_location(adev
, mc
);
756 /* base offset of vram pages */
757 adev
->vm_manager
.vram_base_offset
= adev
->gfxhub
.funcs
->get_mc_fb_offset(adev
);
759 /* add the xgmi offset of the physical node */
760 adev
->vm_manager
.vram_base_offset
+=
761 adev
->gmc
.xgmi
.physical_node_id
* adev
->gmc
.xgmi
.node_segment_size
;
765 * gmc_v10_0_mc_init - initialize the memory controller driver params
767 * @adev: amdgpu_device pointer
769 * Look up the amount of vram, vram width, and decide how to place
770 * vram and gart within the GPU's physical address space.
771 * Returns 0 for success.
773 static int gmc_v10_0_mc_init(struct amdgpu_device
*adev
)
777 /* size in MB on si */
778 adev
->gmc
.mc_vram_size
=
779 adev
->nbio
.funcs
->get_memsize(adev
) * 1024ULL * 1024ULL;
780 adev
->gmc
.real_vram_size
= adev
->gmc
.mc_vram_size
;
782 if (!(adev
->flags
& AMD_IS_APU
)) {
783 r
= amdgpu_device_resize_fb_bar(adev
);
787 adev
->gmc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
788 adev
->gmc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
791 if ((adev
->flags
& AMD_IS_APU
) && !amdgpu_passthrough(adev
)) {
792 adev
->gmc
.aper_base
= adev
->gfxhub
.funcs
->get_mc_fb_offset(adev
);
793 adev
->gmc
.aper_size
= adev
->gmc
.real_vram_size
;
797 /* In case the PCI BAR is larger than the actual amount of vram */
798 adev
->gmc
.visible_vram_size
= adev
->gmc
.aper_size
;
799 if (adev
->gmc
.visible_vram_size
> adev
->gmc
.real_vram_size
)
800 adev
->gmc
.visible_vram_size
= adev
->gmc
.real_vram_size
;
802 /* set the gart size */
803 if (amdgpu_gart_size
== -1) {
804 switch (adev
->asic_type
) {
808 case CHIP_SIENNA_CICHLID
:
809 case CHIP_NAVY_FLOUNDER
:
811 case CHIP_DIMGREY_CAVEFISH
:
812 case CHIP_BEIGE_GOBY
:
813 case CHIP_YELLOW_CARP
:
814 case CHIP_CYAN_SKILLFISH
:
816 adev
->gmc
.gart_size
= 512ULL << 20;
820 adev
->gmc
.gart_size
= (u64
)amdgpu_gart_size
<< 20;
822 gmc_v10_0_vram_gtt_location(adev
, &adev
->gmc
);
827 static int gmc_v10_0_gart_init(struct amdgpu_device
*adev
)
832 WARN(1, "NAVI10 PCIE GART already initialized\n");
836 /* Initialize common gart structure */
837 r
= amdgpu_gart_init(adev
);
841 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
842 adev
->gart
.gart_pte_flags
= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC
) |
843 AMDGPU_PTE_EXECUTABLE
;
845 return amdgpu_gart_table_vram_alloc(adev
);
848 static int gmc_v10_0_sw_init(void *handle
)
850 int r
, vram_width
= 0, vram_type
= 0, vram_vendor
= 0;
851 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
853 adev
->gfxhub
.funcs
->init(adev
);
855 adev
->mmhub
.funcs
->init(adev
);
857 spin_lock_init(&adev
->gmc
.invalidate_lock
);
859 if ((adev
->flags
& AMD_IS_APU
) && amdgpu_emu_mode
== 1) {
860 adev
->gmc
.vram_type
= AMDGPU_VRAM_TYPE_DDR4
;
861 adev
->gmc
.vram_width
= 64;
862 } else if (amdgpu_emu_mode
== 1) {
863 adev
->gmc
.vram_type
= AMDGPU_VRAM_TYPE_GDDR6
;
864 adev
->gmc
.vram_width
= 1 * 128; /* numchan * chansize */
866 r
= amdgpu_atomfirmware_get_vram_info(adev
,
867 &vram_width
, &vram_type
, &vram_vendor
);
868 adev
->gmc
.vram_width
= vram_width
;
870 adev
->gmc
.vram_type
= vram_type
;
871 adev
->gmc
.vram_vendor
= vram_vendor
;
874 switch (adev
->asic_type
) {
878 case CHIP_SIENNA_CICHLID
:
879 case CHIP_NAVY_FLOUNDER
:
881 case CHIP_DIMGREY_CAVEFISH
:
882 case CHIP_BEIGE_GOBY
:
883 case CHIP_YELLOW_CARP
:
884 case CHIP_CYAN_SKILLFISH
:
885 adev
->num_vmhubs
= 2;
887 * To fulfill 4-level page support,
888 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
889 * block size 512 (9bit)
891 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 48);
897 /* This interrupt is VMC page fault.*/
898 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VMC
,
899 VMC_1_0__SRCID__VM_FAULT
,
900 &adev
->gmc
.vm_fault
);
905 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_UTCL2
,
906 UTCL2_1_0__SRCID__FAULT
,
907 &adev
->gmc
.vm_fault
);
911 if (!amdgpu_sriov_vf(adev
)) {
912 /* interrupt sent to DF. */
913 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DF
, 0,
920 * Set the internal MC address mask This is the max address of the GPU's
921 * internal address space.
923 adev
->gmc
.mc_mask
= 0xffffffffffffULL
; /* 48 bit MC */
925 r
= dma_set_mask_and_coherent(adev
->dev
, DMA_BIT_MASK(44));
927 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
931 if (adev
->gmc
.xgmi
.supported
) {
932 r
= adev
->gfxhub
.funcs
->get_xgmi_info(adev
);
937 r
= gmc_v10_0_mc_init(adev
);
941 amdgpu_gmc_get_vbios_allocations(adev
);
942 amdgpu_gmc_get_reserved_allocation(adev
);
945 r
= amdgpu_bo_init(adev
);
949 r
= gmc_v10_0_gart_init(adev
);
955 * VMID 0 is reserved for System
956 * amdgpu graphics/compute will use VMIDs 1-7
957 * amdkfd will use VMIDs 8-15
959 adev
->vm_manager
.first_kfd_vmid
= 8;
961 amdgpu_vm_manager_init(adev
);
967 * gmc_v10_0_gart_fini - vm fini callback
969 * @adev: amdgpu_device pointer
971 * Tears down the driver GART/VM setup (CIK).
973 static void gmc_v10_0_gart_fini(struct amdgpu_device
*adev
)
975 amdgpu_gart_table_vram_free(adev
);
978 static int gmc_v10_0_sw_fini(void *handle
)
980 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
982 amdgpu_vm_manager_fini(adev
);
983 gmc_v10_0_gart_fini(adev
);
984 amdgpu_gem_force_release(adev
);
985 amdgpu_bo_fini(adev
);
990 static void gmc_v10_0_init_golden_registers(struct amdgpu_device
*adev
)
992 switch (adev
->asic_type
) {
996 case CHIP_SIENNA_CICHLID
:
997 case CHIP_NAVY_FLOUNDER
:
999 case CHIP_DIMGREY_CAVEFISH
:
1000 case CHIP_BEIGE_GOBY
:
1001 case CHIP_YELLOW_CARP
:
1002 case CHIP_CYAN_SKILLFISH
:
1010 * gmc_v10_0_gart_enable - gart enable
1012 * @adev: amdgpu_device pointer
1014 static int gmc_v10_0_gart_enable(struct amdgpu_device
*adev
)
1019 if (adev
->gart
.bo
== NULL
) {
1020 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
1024 if (amdgpu_sriov_vf(adev
) && amdgpu_in_reset(adev
))
1027 r
= amdgpu_gart_table_vram_pin(adev
);
1032 r
= adev
->gfxhub
.funcs
->gart_enable(adev
);
1036 r
= adev
->mmhub
.funcs
->gart_enable(adev
);
1040 adev
->hdp
.funcs
->init_registers(adev
);
1042 /* Flush HDP after it is initialized */
1043 adev
->hdp
.funcs
->flush_hdp(adev
, NULL
);
1045 value
= (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
) ?
1048 adev
->gfxhub
.funcs
->set_fault_enable_default(adev
, value
);
1049 adev
->mmhub
.funcs
->set_fault_enable_default(adev
, value
);
1050 gmc_v10_0_flush_gpu_tlb(adev
, 0, AMDGPU_MMHUB_0
, 0);
1051 gmc_v10_0_flush_gpu_tlb(adev
, 0, AMDGPU_GFXHUB_0
, 0);
1053 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1054 (unsigned)(adev
->gmc
.gart_size
>> 20),
1055 (unsigned long long)amdgpu_bo_gpu_offset(adev
->gart
.bo
));
1057 adev
->gart
.ready
= true;
1062 static int gmc_v10_0_hw_init(void *handle
)
1065 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1067 /* The sequence of these two function calls matters.*/
1068 gmc_v10_0_init_golden_registers(adev
);
1071 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1072 * register setup within GMC, or else system hang when harvesting SA.
1074 if (adev
->gfxhub
.funcs
&& adev
->gfxhub
.funcs
->utcl2_harvest
)
1075 adev
->gfxhub
.funcs
->utcl2_harvest(adev
);
1077 r
= gmc_v10_0_gart_enable(adev
);
1081 if (adev
->umc
.funcs
&& adev
->umc
.funcs
->init_registers
)
1082 adev
->umc
.funcs
->init_registers(adev
);
1088 * gmc_v10_0_gart_disable - gart disable
1090 * @adev: amdgpu_device pointer
1092 * This disables all VM page table.
1094 static void gmc_v10_0_gart_disable(struct amdgpu_device
*adev
)
1096 adev
->gfxhub
.funcs
->gart_disable(adev
);
1097 adev
->mmhub
.funcs
->gart_disable(adev
);
1098 amdgpu_gart_table_vram_unpin(adev
);
1101 static int gmc_v10_0_hw_fini(void *handle
)
1103 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1105 gmc_v10_0_gart_disable(adev
);
1107 if (amdgpu_sriov_vf(adev
)) {
1108 /* full access mode, so don't touch any GMC register */
1109 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1113 amdgpu_irq_put(adev
, &adev
->gmc
.ecc_irq
, 0);
1114 amdgpu_irq_put(adev
, &adev
->gmc
.vm_fault
, 0);
1119 static int gmc_v10_0_suspend(void *handle
)
1121 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1123 gmc_v10_0_hw_fini(adev
);
1128 static int gmc_v10_0_resume(void *handle
)
1131 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1133 r
= gmc_v10_0_hw_init(adev
);
1137 amdgpu_vmid_reset_all(adev
);
1142 static bool gmc_v10_0_is_idle(void *handle
)
1144 /* MC is always ready in GMC v10.*/
1148 static int gmc_v10_0_wait_for_idle(void *handle
)
1150 /* There is no need to wait for MC idle in GMC v10.*/
1154 static int gmc_v10_0_soft_reset(void *handle
)
1159 static int gmc_v10_0_set_clockgating_state(void *handle
,
1160 enum amd_clockgating_state state
)
1163 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1165 r
= adev
->mmhub
.funcs
->set_clockgating(adev
, state
);
1169 if (adev
->asic_type
>= CHIP_SIENNA_CICHLID
&&
1170 adev
->asic_type
<= CHIP_YELLOW_CARP
)
1171 return athub_v2_1_set_clockgating(adev
, state
);
1173 return athub_v2_0_set_clockgating(adev
, state
);
1176 static void gmc_v10_0_get_clockgating_state(void *handle
, u32
*flags
)
1178 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1180 adev
->mmhub
.funcs
->get_clockgating(adev
, flags
);
1182 if (adev
->asic_type
>= CHIP_SIENNA_CICHLID
&&
1183 adev
->asic_type
<= CHIP_YELLOW_CARP
)
1184 athub_v2_1_get_clockgating(adev
, flags
);
1186 athub_v2_0_get_clockgating(adev
, flags
);
1189 static int gmc_v10_0_set_powergating_state(void *handle
,
1190 enum amd_powergating_state state
)
1195 const struct amd_ip_funcs gmc_v10_0_ip_funcs
= {
1196 .name
= "gmc_v10_0",
1197 .early_init
= gmc_v10_0_early_init
,
1198 .late_init
= gmc_v10_0_late_init
,
1199 .sw_init
= gmc_v10_0_sw_init
,
1200 .sw_fini
= gmc_v10_0_sw_fini
,
1201 .hw_init
= gmc_v10_0_hw_init
,
1202 .hw_fini
= gmc_v10_0_hw_fini
,
1203 .suspend
= gmc_v10_0_suspend
,
1204 .resume
= gmc_v10_0_resume
,
1205 .is_idle
= gmc_v10_0_is_idle
,
1206 .wait_for_idle
= gmc_v10_0_wait_for_idle
,
1207 .soft_reset
= gmc_v10_0_soft_reset
,
1208 .set_clockgating_state
= gmc_v10_0_set_clockgating_state
,
1209 .set_powergating_state
= gmc_v10_0_set_powergating_state
,
1210 .get_clockgating_state
= gmc_v10_0_get_clockgating_state
,
1213 const struct amdgpu_ip_block_version gmc_v10_0_ip_block
=
1215 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1219 .funcs
= &gmc_v10_0_ip_funcs
,