2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "radeon_asic.h"
28 #include "radeon_trace.h"
32 #define CIK_SDMA_UCODE_SIZE 1050
33 #define CIK_SDMA_UCODE_VERSION 64
35 u32
cik_gpu_check_soft_reset(struct radeon_device
*rdev
);
39 * Starting with CIK, the GPU has new asynchronous
40 * DMA engines. These engines are used for compute
41 * and gfx. There are two DMA engines (SDMA0, SDMA1)
42 * and each one supports 1 ring buffer used for gfx
43 * and 2 queues used for compute.
45 * The programming model is very similar to the CP
46 * (ring buffer, IBs, etc.), but sDMA has it's own
47 * packet format that is different from the PM4 format
48 * used by the CP. sDMA supports copying data, writing
49 * embedded data, solid fills, and a number of other
50 * things. It also has support for tiling/detiling of
55 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
57 * @rdev: radeon_device pointer
58 * @ib: IB object to schedule
60 * Schedule an IB in the DMA ring (CIK).
62 void cik_sdma_ring_ib_execute(struct radeon_device
*rdev
,
65 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
66 u32 extra_bits
= (ib
->vm
? ib
->vm
->id
: 0) & 0xf;
68 if (rdev
->wb
.enabled
) {
69 u32 next_rptr
= ring
->wptr
+ 5;
70 while ((next_rptr
& 7) != 4)
73 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
74 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
75 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
76 radeon_ring_write(ring
, 1); /* number of DWs to follow */
77 radeon_ring_write(ring
, next_rptr
);
80 /* IB packet must end on a 8 DW boundary */
81 while ((ring
->wptr
& 7) != 4)
82 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0));
83 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER
, 0, extra_bits
));
84 radeon_ring_write(ring
, ib
->gpu_addr
& 0xffffffe0); /* base must be 32 byte aligned */
85 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xffffffff);
86 radeon_ring_write(ring
, ib
->length_dw
);
91 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
93 * @rdev: radeon_device pointer
94 * @fence: radeon fence object
96 * Add a DMA fence packet to the ring to write
97 * the fence seq number and DMA trap packet to generate
98 * an interrupt if needed (CIK).
100 void cik_sdma_fence_ring_emit(struct radeon_device
*rdev
,
101 struct radeon_fence
*fence
)
103 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
104 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
106 /* write the fence */
107 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_FENCE
, 0, 0));
108 radeon_ring_write(ring
, addr
& 0xffffffff);
109 radeon_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
110 radeon_ring_write(ring
, fence
->seq
);
111 /* generate an interrupt */
112 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_TRAP
, 0, 0));
114 /* We should be using the new POLL_REG_MEM special op packet here
115 * but it causes sDMA to hang sometimes
117 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
118 radeon_ring_write(ring
, HDP_MEM_COHERENCY_FLUSH_CNTL
>> 2);
119 radeon_ring_write(ring
, 0);
123 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
125 * @rdev: radeon_device pointer
126 * @ring: radeon_ring structure holding ring information
127 * @semaphore: radeon semaphore object
128 * @emit_wait: wait or signal semaphore
130 * Add a DMA semaphore packet to the ring wait on or signal
133 bool cik_sdma_semaphore_ring_emit(struct radeon_device
*rdev
,
134 struct radeon_ring
*ring
,
135 struct radeon_semaphore
*semaphore
,
138 u64 addr
= semaphore
->gpu_addr
;
139 u32 extra_bits
= emit_wait
? 0 : SDMA_SEMAPHORE_EXTRA_S
;
141 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE
, 0, extra_bits
));
142 radeon_ring_write(ring
, addr
& 0xfffffff8);
143 radeon_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
149 * cik_sdma_gfx_stop - stop the gfx async dma engines
151 * @rdev: radeon_device pointer
153 * Stop the gfx async dma ring buffers (CIK).
155 static void cik_sdma_gfx_stop(struct radeon_device
*rdev
)
157 u32 rb_cntl
, reg_offset
;
160 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
162 for (i
= 0; i
< 2; i
++) {
164 reg_offset
= SDMA0_REGISTER_OFFSET
;
166 reg_offset
= SDMA1_REGISTER_OFFSET
;
167 rb_cntl
= RREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
);
168 rb_cntl
&= ~SDMA_RB_ENABLE
;
169 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
170 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, 0);
175 * cik_sdma_rlc_stop - stop the compute async dma engines
177 * @rdev: radeon_device pointer
179 * Stop the compute async dma queues (CIK).
181 static void cik_sdma_rlc_stop(struct radeon_device
*rdev
)
187 * cik_sdma_enable - stop the async dma engines
189 * @rdev: radeon_device pointer
190 * @enable: enable/disable the DMA MEs.
192 * Halt or unhalt the async dma engines (CIK).
194 void cik_sdma_enable(struct radeon_device
*rdev
, bool enable
)
196 u32 me_cntl
, reg_offset
;
199 for (i
= 0; i
< 2; i
++) {
201 reg_offset
= SDMA0_REGISTER_OFFSET
;
203 reg_offset
= SDMA1_REGISTER_OFFSET
;
204 me_cntl
= RREG32(SDMA0_ME_CNTL
+ reg_offset
);
206 me_cntl
&= ~SDMA_HALT
;
208 me_cntl
|= SDMA_HALT
;
209 WREG32(SDMA0_ME_CNTL
+ reg_offset
, me_cntl
);
214 * cik_sdma_gfx_resume - setup and start the async dma engines
216 * @rdev: radeon_device pointer
218 * Set up the gfx DMA ring buffers and enable them (CIK).
219 * Returns 0 for success, error for failure.
221 static int cik_sdma_gfx_resume(struct radeon_device
*rdev
)
223 struct radeon_ring
*ring
;
224 u32 rb_cntl
, ib_cntl
;
226 u32 reg_offset
, wb_offset
;
229 for (i
= 0; i
< 2; i
++) {
231 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
232 reg_offset
= SDMA0_REGISTER_OFFSET
;
233 wb_offset
= R600_WB_DMA_RPTR_OFFSET
;
235 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
];
236 reg_offset
= SDMA1_REGISTER_OFFSET
;
237 wb_offset
= CAYMAN_WB_DMA1_RPTR_OFFSET
;
240 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL
+ reg_offset
, 0);
241 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ reg_offset
, 0);
243 /* Set ring buffer size in dwords */
244 rb_bufsz
= order_base_2(ring
->ring_size
/ 4);
245 rb_cntl
= rb_bufsz
<< 1;
247 rb_cntl
|= SDMA_RB_SWAP_ENABLE
| SDMA_RPTR_WRITEBACK_SWAP_ENABLE
;
249 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
251 /* Initialize the ring buffer's read and write pointers */
252 WREG32(SDMA0_GFX_RB_RPTR
+ reg_offset
, 0);
253 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, 0);
255 /* set the wb address whether it's enabled or not */
256 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI
+ reg_offset
,
257 upper_32_bits(rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFF);
258 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO
+ reg_offset
,
259 ((rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFC));
261 if (rdev
->wb
.enabled
)
262 rb_cntl
|= SDMA_RPTR_WRITEBACK_ENABLE
;
264 WREG32(SDMA0_GFX_RB_BASE
+ reg_offset
, ring
->gpu_addr
>> 8);
265 WREG32(SDMA0_GFX_RB_BASE_HI
+ reg_offset
, ring
->gpu_addr
>> 40);
268 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, ring
->wptr
<< 2);
270 ring
->rptr
= RREG32(SDMA0_GFX_RB_RPTR
+ reg_offset
) >> 2;
273 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
| SDMA_RB_ENABLE
);
275 ib_cntl
= SDMA_IB_ENABLE
;
277 ib_cntl
|= SDMA_IB_SWAP_ENABLE
;
280 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, ib_cntl
);
284 r
= radeon_ring_test(rdev
, ring
->idx
, ring
);
291 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
297 * cik_sdma_rlc_resume - setup and start the async dma engines
299 * @rdev: radeon_device pointer
301 * Set up the compute DMA queues and enable them (CIK).
302 * Returns 0 for success, error for failure.
304 static int cik_sdma_rlc_resume(struct radeon_device
*rdev
)
311 * cik_sdma_load_microcode - load the sDMA ME ucode
313 * @rdev: radeon_device pointer
315 * Loads the sDMA0/1 ucode.
316 * Returns 0 for success, -EINVAL if the ucode is not available.
318 static int cik_sdma_load_microcode(struct radeon_device
*rdev
)
320 const __be32
*fw_data
;
326 /* stop the gfx rings and rlc compute queues */
327 cik_sdma_gfx_stop(rdev
);
328 cik_sdma_rlc_stop(rdev
);
331 cik_sdma_enable(rdev
, false);
334 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
335 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
336 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
337 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
338 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
341 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
342 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
343 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
344 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
345 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
347 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
348 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
353 * cik_sdma_resume - setup and start the async dma engines
355 * @rdev: radeon_device pointer
357 * Set up the DMA engines and enable them (CIK).
358 * Returns 0 for success, error for failure.
360 int cik_sdma_resume(struct radeon_device
*rdev
)
365 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_SDMA
| SOFT_RESET_SDMA1
);
366 RREG32(SRBM_SOFT_RESET
);
368 WREG32(SRBM_SOFT_RESET
, 0);
369 RREG32(SRBM_SOFT_RESET
);
371 r
= cik_sdma_load_microcode(rdev
);
376 cik_sdma_enable(rdev
, true);
378 /* start the gfx rings and rlc compute queues */
379 r
= cik_sdma_gfx_resume(rdev
);
382 r
= cik_sdma_rlc_resume(rdev
);
390 * cik_sdma_fini - tear down the async dma engines
392 * @rdev: radeon_device pointer
394 * Stop the async dma engines and free the rings (CIK).
396 void cik_sdma_fini(struct radeon_device
*rdev
)
398 /* stop the gfx rings and rlc compute queues */
399 cik_sdma_gfx_stop(rdev
);
400 cik_sdma_rlc_stop(rdev
);
402 cik_sdma_enable(rdev
, false);
403 radeon_ring_fini(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
]);
404 radeon_ring_fini(rdev
, &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
]);
405 /* XXX - compute dma queue tear down */
409 * cik_copy_dma - copy pages using the DMA engine
411 * @rdev: radeon_device pointer
412 * @src_offset: src GPU address
413 * @dst_offset: dst GPU address
414 * @num_gpu_pages: number of GPU pages to xfer
415 * @fence: radeon fence object
417 * Copy GPU paging using the DMA engine (CIK).
418 * Used by the radeon ttm implementation to move pages if
419 * registered as the asic copy callback.
421 int cik_copy_dma(struct radeon_device
*rdev
,
422 uint64_t src_offset
, uint64_t dst_offset
,
423 unsigned num_gpu_pages
,
424 struct radeon_fence
**fence
)
426 struct radeon_semaphore
*sem
= NULL
;
427 int ring_index
= rdev
->asic
->copy
.dma_ring_index
;
428 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
429 u32 size_in_bytes
, cur_size_in_bytes
;
433 r
= radeon_semaphore_create(rdev
, &sem
);
435 DRM_ERROR("radeon: moving bo (%d).\n", r
);
439 size_in_bytes
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
);
440 num_loops
= DIV_ROUND_UP(size_in_bytes
, 0x1fffff);
441 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 7 + 14);
443 DRM_ERROR("radeon: moving bo (%d).\n", r
);
444 radeon_semaphore_free(rdev
, &sem
, NULL
);
448 radeon_semaphore_sync_to(sem
, *fence
);
449 radeon_semaphore_sync_rings(rdev
, sem
, ring
->idx
);
451 for (i
= 0; i
< num_loops
; i
++) {
452 cur_size_in_bytes
= size_in_bytes
;
453 if (cur_size_in_bytes
> 0x1fffff)
454 cur_size_in_bytes
= 0x1fffff;
455 size_in_bytes
-= cur_size_in_bytes
;
456 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_COPY
, SDMA_COPY_SUB_OPCODE_LINEAR
, 0));
457 radeon_ring_write(ring
, cur_size_in_bytes
);
458 radeon_ring_write(ring
, 0); /* src/dst endian swap */
459 radeon_ring_write(ring
, src_offset
& 0xffffffff);
460 radeon_ring_write(ring
, upper_32_bits(src_offset
) & 0xffffffff);
461 radeon_ring_write(ring
, dst_offset
& 0xfffffffc);
462 radeon_ring_write(ring
, upper_32_bits(dst_offset
) & 0xffffffff);
463 src_offset
+= cur_size_in_bytes
;
464 dst_offset
+= cur_size_in_bytes
;
467 r
= radeon_fence_emit(rdev
, fence
, ring
->idx
);
469 radeon_ring_unlock_undo(rdev
, ring
);
473 radeon_ring_unlock_commit(rdev
, ring
);
474 radeon_semaphore_free(rdev
, &sem
, *fence
);
480 * cik_sdma_ring_test - simple async dma engine test
482 * @rdev: radeon_device pointer
483 * @ring: radeon_ring structure holding ring information
485 * Test the DMA engine by writing using it to write an
486 * value to memory. (CIK).
487 * Returns 0 for success, error for failure.
489 int cik_sdma_ring_test(struct radeon_device
*rdev
,
490 struct radeon_ring
*ring
)
494 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
498 DRM_ERROR("invalid vram scratch pointer\n");
505 r
= radeon_ring_lock(rdev
, ring
, 4);
507 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring
->idx
, r
);
510 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
511 radeon_ring_write(ring
, rdev
->vram_scratch
.gpu_addr
& 0xfffffffc);
512 radeon_ring_write(ring
, upper_32_bits(rdev
->vram_scratch
.gpu_addr
) & 0xffffffff);
513 radeon_ring_write(ring
, 1); /* number of DWs to follow */
514 radeon_ring_write(ring
, 0xDEADBEEF);
515 radeon_ring_unlock_commit(rdev
, ring
);
517 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
519 if (tmp
== 0xDEADBEEF)
524 if (i
< rdev
->usec_timeout
) {
525 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
527 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
535 * cik_sdma_ib_test - test an IB on the DMA engine
537 * @rdev: radeon_device pointer
538 * @ring: radeon_ring structure holding ring information
540 * Test a simple IB in the DMA ring (CIK).
541 * Returns 0 on success, error on failure.
543 int cik_sdma_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
548 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
552 DRM_ERROR("invalid vram scratch pointer\n");
559 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
561 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
565 ib
.ptr
[0] = SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
566 ib
.ptr
[1] = rdev
->vram_scratch
.gpu_addr
& 0xfffffffc;
567 ib
.ptr
[2] = upper_32_bits(rdev
->vram_scratch
.gpu_addr
) & 0xffffffff;
569 ib
.ptr
[4] = 0xDEADBEEF;
572 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
574 radeon_ib_free(rdev
, &ib
);
575 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
578 r
= radeon_fence_wait(ib
.fence
, false);
580 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
583 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
585 if (tmp
== 0xDEADBEEF)
589 if (i
< rdev
->usec_timeout
) {
590 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
592 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp
);
595 radeon_ib_free(rdev
, &ib
);
600 * cik_sdma_is_lockup - Check if the DMA engine is locked up
602 * @rdev: radeon_device pointer
603 * @ring: radeon_ring structure holding ring information
605 * Check if the async DMA engine is locked up (CIK).
606 * Returns true if the engine appears to be locked up, false if not.
608 bool cik_sdma_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
610 u32 reset_mask
= cik_gpu_check_soft_reset(rdev
);
613 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
614 mask
= RADEON_RESET_DMA
;
616 mask
= RADEON_RESET_DMA1
;
618 if (!(reset_mask
& mask
)) {
619 radeon_ring_lockup_update(ring
);
622 /* force ring activities */
623 radeon_ring_force_activity(rdev
, ring
);
624 return radeon_ring_test_lockup(rdev
, ring
);
628 * cik_sdma_vm_set_page - update the page tables using sDMA
630 * @rdev: radeon_device pointer
631 * @ib: indirect buffer to fill with commands
632 * @pe: addr of the page entry
633 * @addr: dst addr to write into pe
634 * @count: number of page entries to update
635 * @incr: increase next addr by incr bytes
636 * @flags: access flags
638 * Update the page tables using sDMA (CIK).
640 void cik_sdma_vm_set_page(struct radeon_device
*rdev
,
641 struct radeon_ib
*ib
,
643 uint64_t addr
, unsigned count
,
644 uint32_t incr
, uint32_t flags
)
649 trace_radeon_vm_set_page(pe
, addr
, count
, incr
, flags
);
651 if (flags
& R600_PTE_SYSTEM
) {
657 /* for non-physically contiguous pages (system) */
658 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
659 ib
->ptr
[ib
->length_dw
++] = pe
;
660 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
661 ib
->ptr
[ib
->length_dw
++] = ndw
;
662 for (; ndw
> 0; ndw
-= 2, --count
, pe
+= 8) {
663 value
= radeon_vm_map_gart(rdev
, addr
);
664 value
&= 0xFFFFFFFFFFFFF000ULL
;
667 ib
->ptr
[ib
->length_dw
++] = value
;
668 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
677 if (flags
& R600_PTE_VALID
)
681 /* for physically contiguous pages (vram) */
682 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE
, 0, 0);
683 ib
->ptr
[ib
->length_dw
++] = pe
; /* dst addr */
684 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
685 ib
->ptr
[ib
->length_dw
++] = flags
; /* mask */
686 ib
->ptr
[ib
->length_dw
++] = 0;
687 ib
->ptr
[ib
->length_dw
++] = value
; /* value */
688 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
689 ib
->ptr
[ib
->length_dw
++] = incr
; /* increment size */
690 ib
->ptr
[ib
->length_dw
++] = 0;
691 ib
->ptr
[ib
->length_dw
++] = ndw
; /* number of entries */
697 while (ib
->length_dw
& 0x7)
698 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0);
702 * cik_dma_vm_flush - cik vm flush using sDMA
704 * @rdev: radeon_device pointer
706 * Update the page table base and flush the VM TLB
709 void cik_dma_vm_flush(struct radeon_device
*rdev
, int ridx
, struct radeon_vm
*vm
)
711 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
716 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
718 radeon_ring_write(ring
, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (vm
->id
<< 2)) >> 2);
720 radeon_ring_write(ring
, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((vm
->id
- 8) << 2)) >> 2);
722 radeon_ring_write(ring
, vm
->pd_gpu_addr
>> 12);
724 /* update SH_MEM_* regs */
725 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
726 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
727 radeon_ring_write(ring
, VMID(vm
->id
));
729 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
730 radeon_ring_write(ring
, SH_MEM_BASES
>> 2);
731 radeon_ring_write(ring
, 0);
733 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
734 radeon_ring_write(ring
, SH_MEM_CONFIG
>> 2);
735 radeon_ring_write(ring
, 0);
737 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
738 radeon_ring_write(ring
, SH_MEM_APE1_BASE
>> 2);
739 radeon_ring_write(ring
, 1);
741 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
742 radeon_ring_write(ring
, SH_MEM_APE1_LIMIT
>> 2);
743 radeon_ring_write(ring
, 0);
745 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
746 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
747 radeon_ring_write(ring
, VMID(0));
750 /* We should be using the new POLL_REG_MEM special op packet here
751 * but it causes sDMA to hang sometimes
753 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
754 radeon_ring_write(ring
, HDP_MEM_COHERENCY_FLUSH_CNTL
>> 2);
755 radeon_ring_write(ring
, 0);
758 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
759 radeon_ring_write(ring
, VM_INVALIDATE_REQUEST
>> 2);
760 radeon_ring_write(ring
, 1 << vm
->id
);