]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
drm/amdgpu: implement gmc_v8_0_emit_flush_gpu_tlb
authorChristian König <christian.koenig@amd.com>
Fri, 12 Jan 2018 18:14:42 +0000 (19:14 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 19 Feb 2018 19:18:09 +0000 (14:18 -0500)
Unify tlb flushing for gmc v8.

v2: handle UVD v6 as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vi.h

index 81afd54633180c47238a7576547f839bbfd0eb53..e4d209b5c8796c1ab2ef9fa6a8917619aaaf4459 100644 (file)
@@ -6333,28 +6333,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 {
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
 
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
-                                WRITE_DATA_DST_SEL(0)) |
-                                WR_CONFIRM);
-       if (vmid < 8) {
-               amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
-       } else {
-               amdgpu_ring_write(ring,
-                                 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
-       }
-       amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, pd_addr >> 12);
-
-       /* bits 0-15 are the VM contexts0-15 */
-       /* invalidate the cache */
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                WRITE_DATA_DST_SEL(0)));
-       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 0);
-       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
 
        /* wait for the invalidate to complete */
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -6886,7 +6865,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .emit_frame_size = /* maximum 215dw if count 16 IBs in */
                5 +  /* COND_EXEC */
                7 +  /* PIPELINE_SYNC */
-               19 + /* VM_FLUSH */
+               VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
                8 +  /* FENCE for VM_FLUSH */
                20 + /* GDS switch */
                4 + /* double SWITCH_BUFFER,
@@ -6933,7 +6912,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
                7 + /* gfx_v8_0_ring_emit_hdp_flush */
                5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
                7 + /* gfx_v8_0_ring_emit_pipeline_sync */
-               17 + /* gfx_v8_0_ring_emit_vm_flush */
+               VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
                7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
        .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
        .emit_ib = gfx_v8_0_ring_emit_ib_compute,
index ac73b2c60fc302905deb73bbb76363b6b4a16f42..267ff3d4872a2b0ba2ddd7c0241834584671c4d1 100644 (file)
@@ -611,6 +611,24 @@ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
 
+static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
+                                           unsigned vmid, unsigned pasid,
+                                           uint64_t pd_addr)
+{
+       uint32_t reg;
+
+       if (vmid < 8)
+               reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
+       else
+               reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
+       amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
+
+       /* bits 0-15 are the VM contexts0-15 */
+       amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
+
+       return pd_addr;
+}
+
 /**
  * gmc_v8_0_set_pte_pde - update the page tables using MMIO
  *
@@ -1640,6 +1658,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 
 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
        .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
+       .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
        .set_pte_pde = gmc_v8_0_set_pte_pde,
        .set_prt = gmc_v8_0_set_prt,
        .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
index 0fb4b4409ef1b02802954c2f5079c57925bffec1..0aa3363718160ad8248cce7ce29263cee5735150 100644 (file)
@@ -862,20 +862,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                         unsigned vmid, unsigned pasid,
                                         uint64_t pd_addr)
 {
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
-                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       if (vmid < 8) {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
-       } else {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
-       }
-       amdgpu_ring_write(ring, pd_addr >> 12);
-
-       /* flush TLB */
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
-                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
 
        /* wait for flush */
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@@ -1215,7 +1202,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
                6 + /* sdma_v2_4_ring_emit_hdp_flush */
                3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
                6 + /* sdma_v2_4_ring_emit_pipeline_sync */
-               12 + /* sdma_v2_4_ring_emit_vm_flush */
+               VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
                10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
        .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
        .emit_ib = sdma_v2_4_ring_emit_ib,
index 935c3a8b66299d5b199d83cb1aea10abe94387ca..e417546e204836f070c0a88a1bd579756affc52b 100644 (file)
@@ -1128,20 +1128,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                         unsigned vmid, unsigned pasid,
                                         uint64_t pd_addr)
 {
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
-                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       if (vmid < 8) {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
-       } else {
-               amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
-       }
-       amdgpu_ring_write(ring, pd_addr >> 12);
-
-       /* flush TLB */
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
-                         SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
-       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
-       amdgpu_ring_write(ring, 1 << vmid);
+       amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
 
        /* wait for flush */
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
index 0446fe2c67e14d3b2f774d31b569688b4088a14b..5f499e663e2ac1609a23b1892dd7254bdd001d52 100644 (file)
@@ -1087,26 +1087,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                        unsigned vmid, unsigned pasid,
                                        uint64_t pd_addr)
 {
-       uint32_t reg;
-
-       if (vmid < 8)
-               reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
-       else
-               reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
-
-       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
-       amdgpu_ring_write(ring, reg << 2);
-       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
-       amdgpu_ring_write(ring, pd_addr >> 12);
-       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
-       amdgpu_ring_write(ring, 0x8);
-
-       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
-       amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
-       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
-       amdgpu_ring_write(ring, 1 << vmid);
-       amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
-       amdgpu_ring_write(ring, 0x8);
+       amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
 
        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
index 575d7aed5d32f2476f9056923b68bd768e66885e..6cc2bee4abf77b3775ad3ed146812093eda9c9ec 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef __VI_H__
 #define __VI_H__
 
+#define VI_FLUSH_GPU_TLB_NUM_WREG      2
+
 void vi_srbm_select(struct amdgpu_device *adev,
                    u32 me, u32 pipe, u32 queue, u32 vmid);
 int vi_set_ip_blocks(struct amdgpu_device *adev);