bool emit_wait);
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
uint64_t pd_addr);
+ void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
struct amdgpu_fence *fence;
struct amdgpu_user_fence *user;
struct amdgpu_vm *vm;
- bool flush_hdp_writefifo;
struct amdgpu_sync sync;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
#define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit))
#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
+#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
struct amdgpu_ib *ib = &ibs[0];
unsigned i;
int r = 0;
- bool flush_hdp = true;
if (num_ibs == 0)
return -EINVAL;
ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size);
+ if (ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
amdgpu_ring_unlock_undo(ring);
return -EINVAL;
}
- ib->flush_hdp_writefifo = flush_hdp;
- flush_hdp = false;
amdgpu_ring_emit_ib(ring, ib);
}
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
-static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *);
-
/**
* cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
*
u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
u32 next_rptr = ring->wptr + 5;
- if (ib->flush_hdp_writefifo)
- next_rptr += 6;
-
while ((next_rptr & 7) != 4)
next_rptr++;
amdgpu_ring_write(ring, 1); /* number of DWs to follow */
amdgpu_ring_write(ring, next_rptr);
- if (ib->flush_hdp_writefifo) {
- /* flush HDP */
- cik_sdma_hdp_flush_ring_emit(ring);
- }
-
/* IB packet must end on a 8 DW boundary */
while ((ring->wptr & 7) != 4)
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
}
/**
- * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
-static void cik_sdma_hdp_flush_ring_emit(struct amdgpu_ring *ring)
+static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
.emit_fence = cik_sdma_ring_emit_fence,
.emit_semaphore = cik_sdma_ring_emit_semaphore,
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
+ .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
.test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib,
.is_lockup = cik_sdma_ring_is_lockup,
}
/**
- * gfx_v7_0_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
+ * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
*
* @adev: amdgpu_device pointer
* @ridx: amdgpu ring index
*
* Emits an hdp flush on the cp.
*/
-static void gfx_v7_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring)
+static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask;
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
control |= INDIRECT_BUFFER_VALID;
- if (ib->flush_hdp_writefifo)
- next_rptr += 7;
-
if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
next_rptr += 2;
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
amdgpu_ring_write(ring, next_rptr);
- if (ib->flush_hdp_writefifo)
- gfx_v7_0_hdp_flush_cp_ring_emit(ring);
-
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
+ .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.is_lockup = gfx_v7_0_ring_is_lockup,
}
}
-static void gfx_v8_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring)
+static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
control |= INDIRECT_BUFFER_VALID;
- if (ib->flush_hdp_writefifo)
- next_rptr += 7;
-
if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
next_rptr += 2;
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
amdgpu_ring_write(ring, next_rptr);
- if (ib->flush_hdp_writefifo)
- gfx_v8_0_hdp_flush_cp_ring_emit(ring);
-
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
+ .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.is_lockup = gfx_v8_0_ring_is_lockup,
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
}
-static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *);
-
/**
* sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
*
u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
u32 next_rptr = ring->wptr + 5;
- if (ib->flush_hdp_writefifo)
- next_rptr += 6;
-
while ((next_rptr & 7) != 2)
next_rptr++;
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
amdgpu_ring_write(ring, next_rptr);
- if (ib->flush_hdp_writefifo) {
- /* flush HDP */
- sdma_v2_4_hdp_flush_ring_emit(ring);
- }
-
/* IB packet must end on a 8 DW boundary */
while ((ring->wptr & 7) != 2)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
*
* Emit an hdp flush packet on the requested DMA ring.
*/
-static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *ring)
+static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_semaphore = sdma_v2_4_ring_emit_semaphore,
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
.test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib,
.is_lockup = sdma_v2_4_ring_is_lockup,
}
}
-static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *);
-
/**
* sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
*
u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
u32 next_rptr = ring->wptr + 5;
- if (ib->flush_hdp_writefifo)
- next_rptr += 6;
-
while ((next_rptr & 7) != 2)
next_rptr++;
next_rptr += 6;
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
amdgpu_ring_write(ring, next_rptr);
- /* flush HDP */
- if (ib->flush_hdp_writefifo) {
- sdma_v3_0_hdp_flush_ring_emit(ring);
- }
-
/* IB packet must end on a 8 DW boundary */
while ((ring->wptr & 7) != 2)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
}
/**
- * sdma_v3_0_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
-static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *ring)
+static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
.emit_fence = sdma_v3_0_ring_emit_fence,
.emit_semaphore = sdma_v3_0_ring_emit_semaphore,
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
.test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib,
.is_lockup = sdma_v3_0_ring_is_lockup,