]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
drm/amdgpu: switch to new amdgpu_nbio structure
authorHawking Zhang <Hawking.Zhang@amd.com>
Fri, 23 Aug 2019 11:39:18 +0000 (19:39 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 13 Sep 2019 22:11:03 +0000 (17:11 -0500)
no functional change, just switch to new structures

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
27 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c

index bd37df5dd6d0489e0ec6edbdafa4333b9e324d2d..ca963ed6049c22d5a574e6d3be42e22ab95b2911 100644 (file)
@@ -73,6 +73,7 @@
 #include "amdgpu_gmc.h"
 #include "amdgpu_gfx.h"
 #include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_virt.h"
 #include "amdgpu_csa.h"
@@ -644,69 +645,11 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
 
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
-       u32 ref_and_mask_cp0;
-       u32 ref_and_mask_cp1;
-       u32 ref_and_mask_cp2;
-       u32 ref_and_mask_cp3;
-       u32 ref_and_mask_cp4;
-       u32 ref_and_mask_cp5;
-       u32 ref_and_mask_cp6;
-       u32 ref_and_mask_cp7;
-       u32 ref_and_mask_cp8;
-       u32 ref_and_mask_cp9;
-       u32 ref_and_mask_sdma0;
-       u32 ref_and_mask_sdma1;
-       u32 ref_and_mask_sdma2;
-       u32 ref_and_mask_sdma3;
-       u32 ref_and_mask_sdma4;
-       u32 ref_and_mask_sdma5;
-       u32 ref_and_mask_sdma6;
-       u32 ref_and_mask_sdma7;
-};
-
 struct amdgpu_mmio_remap {
        u32 reg_offset;
        resource_size_t bus_addr;
 };
 
-struct amdgpu_nbio_funcs {
-       const struct nbio_hdp_flush_reg *hdp_flush_reg;
-       u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
-       u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
-       u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
-       u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
-       u32 (*get_rev_id)(struct amdgpu_device *adev);
-       void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
-       void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
-       u32 (*get_memsize)(struct amdgpu_device *adev);
-       void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
-                       bool use_doorbell, int doorbell_index, int doorbell_size);
-       void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
-                                  int doorbell_index, int instance);
-       void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
-                                        bool enable);
-       void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
-                                                 bool enable);
-       void (*ih_doorbell_range)(struct amdgpu_device *adev,
-                                 bool use_doorbell, int doorbell_index);
-       void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
-                                                bool enable);
-       void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
-                                               bool enable);
-       void (*get_clockgating_state)(struct amdgpu_device *adev,
-                                     u32 *flags);
-       void (*ih_control)(struct amdgpu_device *adev);
-       void (*init_registers)(struct amdgpu_device *adev);
-       void (*detect_hw_virt)(struct amdgpu_device *adev);
-       void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
 struct amdgpu_df_funcs {
        void (*sw_init)(struct amdgpu_device *adev);
        void (*enable_broadcast_mode)(struct amdgpu_device *adev,
@@ -921,6 +864,9 @@ struct amdgpu_device {
        u32                             cg_flags;
        u32                             pg_flags;
 
+       /* nbio */
+       struct amdgpu_nbio              nbio;
+
        /* gfx */
        struct amdgpu_gfx               gfx;
 
@@ -974,7 +920,6 @@ struct amdgpu_device {
        /* soc15 register offset based on ip, instance and  segment */
        uint32_t                *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
-       const struct amdgpu_nbio_funcs  *nbio_funcs;
        const struct amdgpu_df_funcs    *df_funcs;
        const struct amdgpu_mmhub_funcs *mmhub_funcs;
 
index 5850c8e34caacc57e2e44581c5e2892022ab7a3d..deee4f40423d74b5476b987dfab2023b86673f0a 100644 (file)
@@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
        unsigned long flags, address, data;
        uint32_t ficadl_val, ficadh_val;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, lo_addr);
@@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, lo_addr);
index db28823891ac0083ac7545b84e6b4ae680823430..efd92fe100a96e6b5c0c2198f992aa1d177d2edd 100644 (file)
@@ -2421,7 +2421,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2491,7 +2491,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -2560,7 +2560,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2881,7 +2881,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
        }
 
        if (amdgpu_emu_mode == 1)
-               adev->nbio_funcs->hdp_flush(adev, NULL);
+               adev->nbio.funcs->hdp_flush(adev, NULL);
 
        tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
        tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -4335,7 +4335,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                switch (ring->me) {
@@ -4355,8 +4355,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        }
 
        gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
-                              adev->nbio_funcs->get_hdp_flush_req_offset(adev),
-                              adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),
                               ref_and_mask, ref_and_mask, 0x20);
 }
 
index 83d45f98a46189da490ae2224988e7b9a4c7009d..547443cf9498fccc7fe2351d8577830edba95384 100644 (file)
@@ -4972,7 +4972,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                switch (ring->me) {
@@ -4992,8 +4992,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        }
 
        gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
-                             adev->nbio_funcs->get_hdp_flush_req_offset(adev),
-                             adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+                             adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+                             adev->nbio.funcs->get_hdp_flush_done_offset(adev),
                              ref_and_mask, ref_and_mask, 0x20);
 }
 
index 241a4e57cf4a993ea1756523c3b69f208f30a9d7..a639b7241200182108784fd89065fb8b50078f91 100644 (file)
@@ -278,7 +278,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        int r;
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        mutex_lock(&adev->mman.gtt_window_lock);
 
@@ -557,7 +557,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
 
        /* size in MB on si */
        adev->gmc.mc_vram_size =
-               adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+               adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
        adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
        adev->gmc.visible_vram_size = adev->gmc.aper_size;
 
@@ -794,7 +794,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
        WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
 
        /* Flush HDP after it is initialized */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
                false : true;
index f91337030dc07857987bfc25ea6cabe8185585e3..0a26107b423d21b426e6d562d01bec92a48b5fba 100644 (file)
@@ -996,7 +996,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 
        /* size in MB on si */
        adev->gmc.mc_vram_size =
-               adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+               adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
        adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 
        if (!(adev->flags & AMD_IS_APU)) {
@@ -1361,7 +1361,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
        WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
 
        /* After HDP is initialized, flush HDP.*/
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
                value = false;
index 9fe08408db588fa6154567d9eba89760f78a7ce8..9af73567e716a62705c4f44b88f2b6a6d4f4f352 100644 (file)
@@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
        /* disable irqs */
        navi10_ih_disable_interrupts(adev);
 
-       adev->nbio_funcs->ih_control(adev);
+       adev->nbio.funcs->ih_control(adev);
 
        /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
@@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
        }
        WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
 
-       adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
+       adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
                                            ih->doorbell_index);
 
        tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
index c05d78d4efc667ce6443f9df6ef86332ed56acef..e7e36fb6113d02d58ad9bdfd720ff1db5848ee19 100644 (file)
@@ -311,7 +311,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
 }
 
 const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
-       .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
index 5ae52085f6b7fde953dc8f36242f10ae5eec15c2..a43b60acf7f63fd396f3dc3987871da65f045d2a 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
 
 #endif
index 6590143c3f7516ab1f532f5ad0a111b8005a8073..635d9e1fc0a364db991317bec42e6d6edccc29b1 100644 (file)
@@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
 }
 
-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
        .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
        .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
        .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 }
 
 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
-       .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
index 0743a6f016f37cfb793166c54c78050c8d199d67..6dc743b732181dc1d8b05317526c611f0b3e5f89 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
 
 #endif
index 74eecb768a82002c140196d80281016319ba3909..d6cbf26074bca475d915d1ac9f1d6b6c13665130 100644 (file)
@@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
-       .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
index 508d549c50291fc89a40539b2a33642275b74a0d..e7aefb252550b7efe6c8aad276699ce1fc2eebaf 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
 
 #endif
index 910fffced43bb9ecabf4868b73c0c6b686b83f08..c416ab8ab1c36891beffc39b9a5b62b760da5ea3 100644 (file)
@@ -266,7 +266,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
 }
 
-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
        .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
        .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
        .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -316,7 +316,6 @@ static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
-       .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
index c442865bac4f219d992cab9f62dae9d8ae4c0144..b1ac828727526367bfa3fc2313c1dfb63ec15a49 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "soc15_common.h"
 
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
 
 #endif
index 85393a99a848be702ba1d5f0faa44d0a3b139784..285f1a1f1db54ef9ee4d8e2c936d6fd61823998b 100644 (file)
@@ -46,6 +46,7 @@
 #include "gmc_v10_0.h"
 #include "gfxhub_v2_0.h"
 #include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
 #include "nv.h"
 #include "navi10_ih.h"
 #include "gfx_v10_0.h"
@@ -63,8 +64,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 {
        unsigned long flags, address, data;
        u32 r;
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
@@ -78,8 +79,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
@@ -119,7 +120,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_memsize(adev);
+       return adev->nbio.funcs->get_memsize(adev);
 }
 
 static u32 nv_get_xclk(struct amdgpu_device *adev)
@@ -279,7 +280,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               u32 memsize = adev->nbio_funcs->get_memsize(adev);
+               u32 memsize = adev->nbio.funcs->get_memsize(adev);
 
                if (memsize != 0xffffffff)
                        break;
@@ -366,8 +367,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
                                        bool enable)
 {
-       adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
-       adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
 }
 
 static const struct amdgpu_ip_block_version nv_common_ip_block =
@@ -421,9 +422,10 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       adev->nbio_funcs = &nbio_v2_3_funcs;
+       adev->nbio.funcs = &nbio_v2_3_funcs;
+       adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 
-       adev->nbio_funcs->detect_hw_virt(adev);
+       adev->nbio.funcs->detect_hw_virt(adev);
 
        switch (adev->asic_type) {
        case CHIP_NAVI10:
@@ -480,12 +482,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 
 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_rev_id(adev);
+       return adev->nbio.funcs->get_rev_id(adev);
 }
 
 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       adev->nbio_funcs->hdp_flush(adev, ring);
+       adev->nbio.funcs->hdp_flush(adev, ring);
 }
 
 static void nv_invalidate_hdp(struct amdgpu_device *adev,
@@ -692,7 +694,7 @@ static int nv_common_hw_init(void *handle)
        /* enable aspm */
        nv_program_aspm(adev);
        /* setup nbio registers */
-       adev->nbio_funcs->init_registers(adev);
+       adev->nbio.funcs->init_registers(adev);
        /* enable the doorbell aperture */
        nv_enable_doorbell_aperture(adev, true);
 
@@ -854,9 +856,9 @@ static int nv_common_set_clockgating_state(void *handle,
        case CHIP_NAVI10:
        case CHIP_NAVI14:
        case CHIP_NAVI12:
-               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+               adev->nbio.funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+               adev->nbio.funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                nv_update_hdp_mem_power_gating(adev,
                                   state == AMD_CG_STATE_GATE ? true : false);
@@ -884,7 +886,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       adev->nbio_funcs->get_clockgating_state(adev, flags);
+       adev->nbio.funcs->get_clockgating_state(adev, flags);
 
        /* AMD_CG_SUPPORT_HDP_MGCG */
        tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
index ff18b3a57892c648ea54d56fb25809653af972da..f4d353bf57c1e73fdb67741ce52e705baba12fff 100644 (file)
@@ -746,13 +746,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask = 0;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
 
        sdma_v4_0_wait_reg_mem(ring, 0, 1,
-                              adev->nbio_funcs->get_hdp_flush_done_offset(adev),
-                              adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                               ref_and_mask, ref_and_mask, 10);
 }
 
index fa2f70ce2e2b49bfa5a4c1d116b10a78ddeedb79..6cd5d4ad08173d3d9d8c040123235cc34d00974c 100644 (file)
@@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask = 0;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
        if (ring->me == 0)
                ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
                          SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
                          SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
-       amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
-       amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
        amdgpu_ring_write(ring, ref_and_mask); /* reference */
        amdgpu_ring_write(ring, ref_and_mask); /* mask */
        amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
                WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
 
-               adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+               adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
                                                      ring->doorbell_index, 20);
 
                if (amdgpu_sriov_vf(adev))
index f70658a536a933cc6bc7f64b8504309dbb617ca1..d61d79435fa0c9a7f10b53c22c278770b27029c5 100644 (file)
@@ -58,6 +58,9 @@
 #include "mmhub_v1_0.h"
 #include "df_v1_7.h"
 #include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
 #include "vega10_ih.h"
 #include "sdma_v4_0.h"
 #include "uvd_v7_0.h"
@@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 {
        unsigned long flags, address, data;
        u32 r;
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
@@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        WREG32(address, reg);
@@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
 {
        unsigned long flags, address, data;
        u64 r;
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        /* read low 32 bit */
@@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 {
        unsigned long flags, address, data;
 
-       address = adev->nbio_funcs->get_pcie_index_offset(adev);
-       data = adev->nbio_funcs->get_pcie_data_offset(adev);
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        /* write low 32 bit */
@@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_memsize(adev);
+       return adev->nbio.funcs->get_memsize(adev);
 }
 
 static u32 soc15_get_xclk(struct amdgpu_device *adev)
@@ -461,7 +464,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               u32 memsize = adev->nbio_funcs->get_memsize(adev);
+               u32 memsize = adev->nbio.funcs->get_memsize(adev);
 
                if (memsize != 0xffffffff)
                        break;
@@ -624,8 +627,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
                                           bool enable)
 {
-       adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
-       adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+       adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
 }
 
 static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -639,7 +642,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
 
 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 {
-       return adev->nbio_funcs->get_rev_id(adev);
+       return adev->nbio.funcs->get_rev_id(adev);
 }
 
 int soc15_set_ip_blocks(struct amdgpu_device *adev)
@@ -665,13 +668,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
        if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
                adev->gmc.xgmi.supported = true;
 
-       if (adev->flags & AMD_IS_APU)
-               adev->nbio_funcs = &nbio_v7_0_funcs;
-       else if (adev->asic_type == CHIP_VEGA20 ||
-               adev->asic_type == CHIP_ARCTURUS)
-               adev->nbio_funcs = &nbio_v7_4_funcs;
-       else
-               adev->nbio_funcs = &nbio_v6_1_funcs;
+       if (adev->flags & AMD_IS_APU) {
+               adev->nbio.funcs = &nbio_v7_0_funcs;
+               adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+       } else if (adev->asic_type == CHIP_VEGA20 ||
+                  adev->asic_type == CHIP_ARCTURUS) {
+               adev->nbio.funcs = &nbio_v7_4_funcs;
+               adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+       } else {
+               adev->nbio.funcs = &nbio_v6_1_funcs;
+               adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+       }
 
        if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
                adev->df_funcs = &df_v3_6_funcs;
@@ -679,7 +686,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                adev->df_funcs = &df_v1_7_funcs;
 
        adev->rev_id = soc15_get_rev_id(adev);
-       adev->nbio_funcs->detect_hw_virt(adev);
+       adev->nbio.funcs->detect_hw_virt(adev);
 
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_ai_virt_ops;
@@ -785,7 +792,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 
 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       adev->nbio_funcs->hdp_flush(adev, ring);
+       adev->nbio.funcs->hdp_flush(adev, ring);
 }
 
 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
@@ -1241,12 +1248,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
        if (!amdgpu_sriov_vf(adev)) {
                for (i = 0; i < adev->sdma.num_instances; i++) {
                        ring = &adev->sdma.instance[i].ring;
-                       adev->nbio_funcs->sdma_doorbell_range(adev, i,
+                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
                                ring->use_doorbell, ring->doorbell_index,
                                adev->doorbell_index.sdma_doorbell_range);
                }
 
-               adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
                                                adev->irq.ih.doorbell_index);
        }
 }
@@ -1260,13 +1267,13 @@ static int soc15_common_hw_init(void *handle)
        /* enable aspm */
        soc15_program_aspm(adev);
        /* setup nbio registers */
-       adev->nbio_funcs->init_registers(adev);
+       adev->nbio.funcs->init_registers(adev);
        /* remap HDP registers to a hole in mmio space,
         * for the purpose of expose those registers
         * to process space
         */
-       if (adev->nbio_funcs->remap_hdp_registers)
-               adev->nbio_funcs->remap_hdp_registers(adev);
+       if (adev->nbio.funcs->remap_hdp_registers)
+               adev->nbio.funcs->remap_hdp_registers(adev);
 
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
@@ -1429,9 +1436,9 @@ static int soc15_common_set_clockgating_state(void *handle,
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
-               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+               adev->nbio.funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+               adev->nbio.funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                soc15_update_hdp_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
@@ -1446,9 +1453,9 @@ static int soc15_common_set_clockgating_state(void *handle,
                break;
        case CHIP_RAVEN:
        case CHIP_RENOIR:
-               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+               adev->nbio.funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+               adev->nbio.funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                soc15_update_hdp_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
@@ -1477,7 +1484,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       adev->nbio_funcs->get_clockgating_state(adev, flags);
+       adev->nbio.funcs->get_clockgating_state(adev, flags);
 
        /* AMD_CG_SUPPORT_HDP_LS */
        data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
index 36ad0c0e8efbcb714fd0acc3ed3e261bbf0ab51b..4628fd10a9ec4dae8c9715ee536bdcf140df8eeb 100644 (file)
@@ -244,7 +244,7 @@ static int vcn_v2_0_hw_init(void *handle)
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i, r;
 
-       adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+       adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
                                             ring->doorbell_index, 0);
 
        ring->sched.ready = true;
index 395c2259f979bb3c79de6768d00ca1ac52a2edd8..2d7109d443c42d8adce74a15020a99a104ea4ef3 100644 (file)
@@ -255,7 +255,7 @@ static int vcn_v2_5_hw_init(void *handle)
                        continue;
                ring = &adev->vcn.inst[j].ring_dec;
 
-               adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+               adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
                                                     ring->doorbell_index, j);
 
                r = amdgpu_ring_test_ring(ring);
index 9eae3536ddad78ffb80931065b71d82446b60ee0..14e0b0438d095d36320b86a35c678d4003f76001 100644 (file)
@@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
        /* disable irqs */
        vega10_ih_disable_interrupts(adev);
 
-       adev->nbio_funcs->ih_control(adev);
+       adev->nbio.funcs->ih_control(adev);
 
        ih = &adev->irq.ih;
        /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
index 22f3c60d380f3b76bf224239f75704d94cacbf62..2a609156213589344bf89e9c51ba58c2b67f0031 100644 (file)
@@ -460,7 +460,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
                return ret;
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        if (!drv2smu)
                memcpy(table_data, table->cpu_addr, table->size);
index 3f12cf341511e72f09c22e5d9a1c63aa425cf986..aa0ee2b46135421477deb36d6f65a810b33ebc37 100644 (file)
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        priv->smu_tables.entry[table_id].table_id);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
index 0dbdde69f2d90ce29fb7f772f712c02b6b52b19a..0f3836fd9666f4d2745bbf0d29b0cab36a686245 100644 (file)
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        priv->smu_tables.entry[table_id].table_id);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
index f9589806bf8340acf464d48ff0a1f38e174ad896..90c782c132d255a96426cf303f8eb4c09eef3f0b 100644 (file)
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        return -EINVAL);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
index b9089c6bea850463fc3cb04901f80dc27e52916b..f604612f411f3a1f2c2e52eebc4ef3698a0a55f7 100644 (file)
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        return ret);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[table_id].table,
                        priv->smu_tables.entry[table_id].size);
@@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
                        return ret);
 
        /* flush hdp cache */
-       adev->nbio_funcs->hdp_flush(adev, NULL);
+       adev->nbio.funcs->hdp_flush(adev, NULL);
 
        memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
                        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);