2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "radeon_ucode.h"
28 #include "radeon_asic.h"
29 #include "radeon_trace.h"
33 #define CIK_SDMA_UCODE_SIZE 1050
34 #define CIK_SDMA_UCODE_VERSION 64
36 u32
cik_gpu_check_soft_reset(struct radeon_device
*rdev
);
40 * Starting with CIK, the GPU has new asynchronous
41 * DMA engines. These engines are used for compute
42 * and gfx. There are two DMA engines (SDMA0, SDMA1)
43 * and each one supports 1 ring buffer used for gfx
44 * and 2 queues used for compute.
46 * The programming model is very similar to the CP
47 * (ring buffer, IBs, etc.), but sDMA has it's own
48 * packet format that is different from the PM4 format
49 * used by the CP. sDMA supports copying data, writing
50 * embedded data, solid fills, and a number of other
51 * things. It also has support for tiling/detiling of
56 * cik_sdma_get_rptr - get the current read pointer
58 * @rdev: radeon_device pointer
59 * @ring: radeon ring pointer
61 * Get the current rptr from the hardware (CIK+).
63 uint32_t cik_sdma_get_rptr(struct radeon_device
*rdev
,
64 struct radeon_ring
*ring
)
68 if (rdev
->wb
.enabled
) {
69 rptr
= rdev
->wb
.wb
[ring
->rptr_offs
/4];
71 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
72 reg
= SDMA0_GFX_RB_RPTR
+ SDMA0_REGISTER_OFFSET
;
74 reg
= SDMA0_GFX_RB_RPTR
+ SDMA1_REGISTER_OFFSET
;
79 return (rptr
& 0x3fffc) >> 2;
83 * cik_sdma_get_wptr - get the current write pointer
85 * @rdev: radeon_device pointer
86 * @ring: radeon ring pointer
88 * Get the current wptr from the hardware (CIK+).
90 uint32_t cik_sdma_get_wptr(struct radeon_device
*rdev
,
91 struct radeon_ring
*ring
)
95 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
96 reg
= SDMA0_GFX_RB_WPTR
+ SDMA0_REGISTER_OFFSET
;
98 reg
= SDMA0_GFX_RB_WPTR
+ SDMA1_REGISTER_OFFSET
;
100 return (RREG32(reg
) & 0x3fffc) >> 2;
104 * cik_sdma_set_wptr - commit the write pointer
106 * @rdev: radeon_device pointer
107 * @ring: radeon ring pointer
109 * Write the wptr back to the hardware (CIK+).
111 void cik_sdma_set_wptr(struct radeon_device
*rdev
,
112 struct radeon_ring
*ring
)
116 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
117 reg
= SDMA0_GFX_RB_WPTR
+ SDMA0_REGISTER_OFFSET
;
119 reg
= SDMA0_GFX_RB_WPTR
+ SDMA1_REGISTER_OFFSET
;
121 WREG32(reg
, (ring
->wptr
<< 2) & 0x3fffc);
126 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
128 * @rdev: radeon_device pointer
129 * @ib: IB object to schedule
131 * Schedule an IB in the DMA ring (CIK).
133 void cik_sdma_ring_ib_execute(struct radeon_device
*rdev
,
134 struct radeon_ib
*ib
)
136 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
137 u32 extra_bits
= (ib
->vm
? ib
->vm
->ids
[ib
->ring
].id
: 0) & 0xf;
139 if (rdev
->wb
.enabled
) {
140 u32 next_rptr
= ring
->wptr
+ 5;
141 while ((next_rptr
& 7) != 4)
144 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
145 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
146 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
));
147 radeon_ring_write(ring
, 1); /* number of DWs to follow */
148 radeon_ring_write(ring
, next_rptr
);
151 /* IB packet must end on a 8 DW boundary */
152 while ((ring
->wptr
& 7) != 4)
153 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0));
154 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER
, 0, extra_bits
));
155 radeon_ring_write(ring
, ib
->gpu_addr
& 0xffffffe0); /* base must be 32 byte aligned */
156 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
157 radeon_ring_write(ring
, ib
->length_dw
);
162 * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
164 * @rdev: radeon_device pointer
165 * @ridx: radeon ring index
167 * Emit an hdp flush packet on the requested DMA ring.
169 static void cik_sdma_hdp_flush_ring_emit(struct radeon_device
*rdev
,
172 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
173 u32 extra_bits
= (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
174 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
177 if (ridx
== R600_RING_TYPE_DMA_INDEX
)
178 ref_and_mask
= SDMA0
;
180 ref_and_mask
= SDMA1
;
182 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM
, 0, extra_bits
));
183 radeon_ring_write(ring
, GPU_HDP_FLUSH_DONE
);
184 radeon_ring_write(ring
, GPU_HDP_FLUSH_REQ
);
185 radeon_ring_write(ring
, ref_and_mask
); /* reference */
186 radeon_ring_write(ring
, ref_and_mask
); /* mask */
187 radeon_ring_write(ring
, (0xfff << 16) | 10); /* retry count, poll interval */
191 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
193 * @rdev: radeon_device pointer
194 * @fence: radeon fence object
196 * Add a DMA fence packet to the ring to write
197 * the fence seq number and DMA trap packet to generate
198 * an interrupt if needed (CIK).
200 void cik_sdma_fence_ring_emit(struct radeon_device
*rdev
,
201 struct radeon_fence
*fence
)
203 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
204 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
206 /* write the fence */
207 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_FENCE
, 0, 0));
208 radeon_ring_write(ring
, lower_32_bits(addr
));
209 radeon_ring_write(ring
, upper_32_bits(addr
));
210 radeon_ring_write(ring
, fence
->seq
);
211 /* generate an interrupt */
212 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_TRAP
, 0, 0));
214 cik_sdma_hdp_flush_ring_emit(rdev
, fence
->ring
);
218 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
220 * @rdev: radeon_device pointer
221 * @ring: radeon_ring structure holding ring information
222 * @semaphore: radeon semaphore object
223 * @emit_wait: wait or signal semaphore
225 * Add a DMA semaphore packet to the ring wait on or signal
228 bool cik_sdma_semaphore_ring_emit(struct radeon_device
*rdev
,
229 struct radeon_ring
*ring
,
230 struct radeon_semaphore
*semaphore
,
233 u64 addr
= semaphore
->gpu_addr
;
234 u32 extra_bits
= emit_wait
? 0 : SDMA_SEMAPHORE_EXTRA_S
;
236 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE
, 0, extra_bits
));
237 radeon_ring_write(ring
, addr
& 0xfffffff8);
238 radeon_ring_write(ring
, upper_32_bits(addr
));
244 * cik_sdma_gfx_stop - stop the gfx async dma engines
246 * @rdev: radeon_device pointer
248 * Stop the gfx async dma ring buffers (CIK).
250 static void cik_sdma_gfx_stop(struct radeon_device
*rdev
)
252 u32 rb_cntl
, reg_offset
;
255 if ((rdev
->asic
->copy
.copy_ring_index
== R600_RING_TYPE_DMA_INDEX
) ||
256 (rdev
->asic
->copy
.copy_ring_index
== CAYMAN_RING_TYPE_DMA1_INDEX
))
257 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
259 for (i
= 0; i
< 2; i
++) {
261 reg_offset
= SDMA0_REGISTER_OFFSET
;
263 reg_offset
= SDMA1_REGISTER_OFFSET
;
264 rb_cntl
= RREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
);
265 rb_cntl
&= ~SDMA_RB_ENABLE
;
266 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
267 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, 0);
269 rdev
->ring
[R600_RING_TYPE_DMA_INDEX
].ready
= false;
270 rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
].ready
= false;
274 * cik_sdma_rlc_stop - stop the compute async dma engines
276 * @rdev: radeon_device pointer
278 * Stop the compute async dma queues (CIK).
280 static void cik_sdma_rlc_stop(struct radeon_device
*rdev
)
286 * cik_sdma_enable - stop the async dma engines
288 * @rdev: radeon_device pointer
289 * @enable: enable/disable the DMA MEs.
291 * Halt or unhalt the async dma engines (CIK).
293 void cik_sdma_enable(struct radeon_device
*rdev
, bool enable
)
295 u32 me_cntl
, reg_offset
;
298 if (enable
== false) {
299 cik_sdma_gfx_stop(rdev
);
300 cik_sdma_rlc_stop(rdev
);
303 for (i
= 0; i
< 2; i
++) {
305 reg_offset
= SDMA0_REGISTER_OFFSET
;
307 reg_offset
= SDMA1_REGISTER_OFFSET
;
308 me_cntl
= RREG32(SDMA0_ME_CNTL
+ reg_offset
);
310 me_cntl
&= ~SDMA_HALT
;
312 me_cntl
|= SDMA_HALT
;
313 WREG32(SDMA0_ME_CNTL
+ reg_offset
, me_cntl
);
318 * cik_sdma_gfx_resume - setup and start the async dma engines
320 * @rdev: radeon_device pointer
322 * Set up the gfx DMA ring buffers and enable them (CIK).
323 * Returns 0 for success, error for failure.
325 static int cik_sdma_gfx_resume(struct radeon_device
*rdev
)
327 struct radeon_ring
*ring
;
328 u32 rb_cntl
, ib_cntl
;
330 u32 reg_offset
, wb_offset
;
333 for (i
= 0; i
< 2; i
++) {
335 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
336 reg_offset
= SDMA0_REGISTER_OFFSET
;
337 wb_offset
= R600_WB_DMA_RPTR_OFFSET
;
339 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
];
340 reg_offset
= SDMA1_REGISTER_OFFSET
;
341 wb_offset
= CAYMAN_WB_DMA1_RPTR_OFFSET
;
344 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL
+ reg_offset
, 0);
345 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ reg_offset
, 0);
347 /* Set ring buffer size in dwords */
348 rb_bufsz
= order_base_2(ring
->ring_size
/ 4);
349 rb_cntl
= rb_bufsz
<< 1;
351 rb_cntl
|= SDMA_RB_SWAP_ENABLE
| SDMA_RPTR_WRITEBACK_SWAP_ENABLE
;
353 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
355 /* Initialize the ring buffer's read and write pointers */
356 WREG32(SDMA0_GFX_RB_RPTR
+ reg_offset
, 0);
357 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, 0);
359 /* set the wb address whether it's enabled or not */
360 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI
+ reg_offset
,
361 upper_32_bits(rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFF);
362 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO
+ reg_offset
,
363 ((rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFC));
365 if (rdev
->wb
.enabled
)
366 rb_cntl
|= SDMA_RPTR_WRITEBACK_ENABLE
;
368 WREG32(SDMA0_GFX_RB_BASE
+ reg_offset
, ring
->gpu_addr
>> 8);
369 WREG32(SDMA0_GFX_RB_BASE_HI
+ reg_offset
, ring
->gpu_addr
>> 40);
372 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, ring
->wptr
<< 2);
375 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
| SDMA_RB_ENABLE
);
377 ib_cntl
= SDMA_IB_ENABLE
;
379 ib_cntl
|= SDMA_IB_SWAP_ENABLE
;
382 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, ib_cntl
);
386 r
= radeon_ring_test(rdev
, ring
->idx
, ring
);
393 if ((rdev
->asic
->copy
.copy_ring_index
== R600_RING_TYPE_DMA_INDEX
) ||
394 (rdev
->asic
->copy
.copy_ring_index
== CAYMAN_RING_TYPE_DMA1_INDEX
))
395 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
401 * cik_sdma_rlc_resume - setup and start the async dma engines
403 * @rdev: radeon_device pointer
405 * Set up the compute DMA queues and enable them (CIK).
406 * Returns 0 for success, error for failure.
408 static int cik_sdma_rlc_resume(struct radeon_device
*rdev
)
415 * cik_sdma_load_microcode - load the sDMA ME ucode
417 * @rdev: radeon_device pointer
419 * Loads the sDMA0/1 ucode.
420 * Returns 0 for success, -EINVAL if the ucode is not available.
422 static int cik_sdma_load_microcode(struct radeon_device
*rdev
)
430 cik_sdma_enable(rdev
, false);
433 const struct sdma_firmware_header_v1_0
*hdr
=
434 (const struct sdma_firmware_header_v1_0
*)rdev
->sdma_fw
->data
;
435 const __le32
*fw_data
;
438 radeon_ucode_print_sdma_hdr(&hdr
->header
);
441 fw_data
= (const __le32
*)
442 (rdev
->sdma_fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
443 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
444 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
445 for (i
= 0; i
< fw_size
; i
++)
446 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, le32_to_cpup(fw_data
++));
447 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
450 fw_data
= (const __le32
*)
451 (rdev
->sdma_fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
452 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
453 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
454 for (i
= 0; i
< fw_size
; i
++)
455 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, le32_to_cpup(fw_data
++));
456 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
458 const __be32
*fw_data
;
461 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
462 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
463 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
464 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
465 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
468 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
469 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
470 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
471 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
472 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
475 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
476 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
481 * cik_sdma_resume - setup and start the async dma engines
483 * @rdev: radeon_device pointer
485 * Set up the DMA engines and enable them (CIK).
486 * Returns 0 for success, error for failure.
488 int cik_sdma_resume(struct radeon_device
*rdev
)
492 r
= cik_sdma_load_microcode(rdev
);
497 cik_sdma_enable(rdev
, true);
499 /* start the gfx rings and rlc compute queues */
500 r
= cik_sdma_gfx_resume(rdev
);
503 r
= cik_sdma_rlc_resume(rdev
);
511 * cik_sdma_fini - tear down the async dma engines
513 * @rdev: radeon_device pointer
515 * Stop the async dma engines and free the rings (CIK).
517 void cik_sdma_fini(struct radeon_device
*rdev
)
520 cik_sdma_enable(rdev
, false);
521 radeon_ring_fini(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
]);
522 radeon_ring_fini(rdev
, &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
]);
523 /* XXX - compute dma queue tear down */
527 * cik_copy_dma - copy pages using the DMA engine
529 * @rdev: radeon_device pointer
530 * @src_offset: src GPU address
531 * @dst_offset: dst GPU address
532 * @num_gpu_pages: number of GPU pages to xfer
533 * @resv: reservation object to sync to
535 * Copy GPU paging using the DMA engine (CIK).
536 * Used by the radeon ttm implementation to move pages if
537 * registered as the asic copy callback.
539 struct radeon_fence
*cik_copy_dma(struct radeon_device
*rdev
,
540 uint64_t src_offset
, uint64_t dst_offset
,
541 unsigned num_gpu_pages
,
542 struct reservation_object
*resv
)
544 struct radeon_fence
*fence
;
545 struct radeon_sync sync
;
546 int ring_index
= rdev
->asic
->copy
.dma_ring_index
;
547 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
548 u32 size_in_bytes
, cur_size_in_bytes
;
552 radeon_sync_create(&sync
);
554 size_in_bytes
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
);
555 num_loops
= DIV_ROUND_UP(size_in_bytes
, 0x1fffff);
556 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 7 + 14);
558 DRM_ERROR("radeon: moving bo (%d).\n", r
);
559 radeon_sync_free(rdev
, &sync
, NULL
);
563 radeon_sync_resv(rdev
, &sync
, resv
, false);
564 radeon_sync_rings(rdev
, &sync
, ring
->idx
);
566 for (i
= 0; i
< num_loops
; i
++) {
567 cur_size_in_bytes
= size_in_bytes
;
568 if (cur_size_in_bytes
> 0x1fffff)
569 cur_size_in_bytes
= 0x1fffff;
570 size_in_bytes
-= cur_size_in_bytes
;
571 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_COPY
, SDMA_COPY_SUB_OPCODE_LINEAR
, 0));
572 radeon_ring_write(ring
, cur_size_in_bytes
);
573 radeon_ring_write(ring
, 0); /* src/dst endian swap */
574 radeon_ring_write(ring
, lower_32_bits(src_offset
));
575 radeon_ring_write(ring
, upper_32_bits(src_offset
));
576 radeon_ring_write(ring
, lower_32_bits(dst_offset
));
577 radeon_ring_write(ring
, upper_32_bits(dst_offset
));
578 src_offset
+= cur_size_in_bytes
;
579 dst_offset
+= cur_size_in_bytes
;
582 r
= radeon_fence_emit(rdev
, &fence
, ring
->idx
);
584 radeon_ring_unlock_undo(rdev
, ring
);
585 radeon_sync_free(rdev
, &sync
, NULL
);
589 radeon_ring_unlock_commit(rdev
, ring
, false);
590 radeon_sync_free(rdev
, &sync
, fence
);
596 * cik_sdma_ring_test - simple async dma engine test
598 * @rdev: radeon_device pointer
599 * @ring: radeon_ring structure holding ring information
601 * Test the DMA engine by writing using it to write an
602 * value to memory. (CIK).
603 * Returns 0 for success, error for failure.
605 int cik_sdma_ring_test(struct radeon_device
*rdev
,
606 struct radeon_ring
*ring
)
614 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
615 index
= R600_WB_DMA_RING_TEST_OFFSET
;
617 index
= CAYMAN_WB_DMA1_RING_TEST_OFFSET
;
619 gpu_addr
= rdev
->wb
.gpu_addr
+ index
;
622 rdev
->wb
.wb
[index
/4] = cpu_to_le32(tmp
);
624 r
= radeon_ring_lock(rdev
, ring
, 5);
626 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring
->idx
, r
);
629 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
630 radeon_ring_write(ring
, lower_32_bits(gpu_addr
));
631 radeon_ring_write(ring
, upper_32_bits(gpu_addr
));
632 radeon_ring_write(ring
, 1); /* number of DWs to follow */
633 radeon_ring_write(ring
, 0xDEADBEEF);
634 radeon_ring_unlock_commit(rdev
, ring
, false);
636 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
637 tmp
= le32_to_cpu(rdev
->wb
.wb
[index
/4]);
638 if (tmp
== 0xDEADBEEF)
643 if (i
< rdev
->usec_timeout
) {
644 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
646 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
654 * cik_sdma_ib_test - test an IB on the DMA engine
656 * @rdev: radeon_device pointer
657 * @ring: radeon_ring structure holding ring information
659 * Test a simple IB in the DMA ring (CIK).
660 * Returns 0 on success, error on failure.
662 int cik_sdma_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
667 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
671 DRM_ERROR("invalid vram scratch pointer\n");
678 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
680 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
684 ib
.ptr
[0] = SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
685 ib
.ptr
[1] = rdev
->vram_scratch
.gpu_addr
& 0xfffffffc;
686 ib
.ptr
[2] = upper_32_bits(rdev
->vram_scratch
.gpu_addr
);
688 ib
.ptr
[4] = 0xDEADBEEF;
691 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
693 radeon_ib_free(rdev
, &ib
);
694 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
697 r
= radeon_fence_wait(ib
.fence
, false);
699 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
702 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
704 if (tmp
== 0xDEADBEEF)
708 if (i
< rdev
->usec_timeout
) {
709 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
711 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp
);
714 radeon_ib_free(rdev
, &ib
);
719 * cik_sdma_is_lockup - Check if the DMA engine is locked up
721 * @rdev: radeon_device pointer
722 * @ring: radeon_ring structure holding ring information
724 * Check if the async DMA engine is locked up (CIK).
725 * Returns true if the engine appears to be locked up, false if not.
727 bool cik_sdma_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
729 u32 reset_mask
= cik_gpu_check_soft_reset(rdev
);
732 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
733 mask
= RADEON_RESET_DMA
;
735 mask
= RADEON_RESET_DMA1
;
737 if (!(reset_mask
& mask
)) {
738 radeon_ring_lockup_update(rdev
, ring
);
741 return radeon_ring_test_lockup(rdev
, ring
);
745 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
747 * @rdev: radeon_device pointer
748 * @ib: indirect buffer to fill with commands
749 * @pe: addr of the page entry
750 * @src: src addr to copy from
751 * @count: number of page entries to update
753 * Update PTEs by copying them from the GART using sDMA (CIK).
755 void cik_sdma_vm_copy_pages(struct radeon_device
*rdev
,
756 struct radeon_ib
*ib
,
757 uint64_t pe
, uint64_t src
,
761 unsigned bytes
= count
* 8;
762 if (bytes
> 0x1FFFF8)
765 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_COPY
,
766 SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
767 ib
->ptr
[ib
->length_dw
++] = bytes
;
768 ib
->ptr
[ib
->length_dw
++] = 0; /* src/dst endian swap */
769 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(src
);
770 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(src
);
771 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(pe
);
772 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
781 * cik_sdma_vm_write_pages - update PTEs by writing them manually
783 * @rdev: radeon_device pointer
784 * @ib: indirect buffer to fill with commands
785 * @pe: addr of the page entry
786 * @addr: dst addr to write into pe
787 * @count: number of page entries to update
788 * @incr: increase next addr by incr bytes
789 * @flags: access flags
791 * Update PTEs by writing them manually using sDMA (CIK).
793 void cik_sdma_vm_write_pages(struct radeon_device
*rdev
,
794 struct radeon_ib
*ib
,
796 uint64_t addr
, unsigned count
,
797 uint32_t incr
, uint32_t flags
)
807 /* for non-physically contiguous pages (system) */
808 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_WRITE
,
809 SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
810 ib
->ptr
[ib
->length_dw
++] = pe
;
811 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
812 ib
->ptr
[ib
->length_dw
++] = ndw
;
813 for (; ndw
> 0; ndw
-= 2, --count
, pe
+= 8) {
814 if (flags
& R600_PTE_SYSTEM
) {
815 value
= radeon_vm_map_gart(rdev
, addr
);
816 value
&= 0xFFFFFFFFFFFFF000ULL
;
817 } else if (flags
& R600_PTE_VALID
) {
824 ib
->ptr
[ib
->length_dw
++] = value
;
825 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
831 * cik_sdma_vm_set_pages - update the page tables using sDMA
833 * @rdev: radeon_device pointer
834 * @ib: indirect buffer to fill with commands
835 * @pe: addr of the page entry
836 * @addr: dst addr to write into pe
837 * @count: number of page entries to update
838 * @incr: increase next addr by incr bytes
839 * @flags: access flags
841 * Update the page tables using sDMA (CIK).
843 void cik_sdma_vm_set_pages(struct radeon_device
*rdev
,
844 struct radeon_ib
*ib
,
846 uint64_t addr
, unsigned count
,
847 uint32_t incr
, uint32_t flags
)
857 if (flags
& R600_PTE_VALID
)
862 /* for physically contiguous pages (vram) */
863 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE
, 0, 0);
864 ib
->ptr
[ib
->length_dw
++] = pe
; /* dst addr */
865 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
866 ib
->ptr
[ib
->length_dw
++] = flags
; /* mask */
867 ib
->ptr
[ib
->length_dw
++] = 0;
868 ib
->ptr
[ib
->length_dw
++] = value
; /* value */
869 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
870 ib
->ptr
[ib
->length_dw
++] = incr
; /* increment size */
871 ib
->ptr
[ib
->length_dw
++] = 0;
872 ib
->ptr
[ib
->length_dw
++] = ndw
; /* number of entries */
881 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
883 * @ib: indirect buffer to fill with padding
886 void cik_sdma_vm_pad_ib(struct radeon_ib
*ib
)
888 while (ib
->length_dw
& 0x7)
889 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0);
893 * cik_dma_vm_flush - cik vm flush using sDMA
895 * @rdev: radeon_device pointer
897 * Update the page table base and flush the VM TLB
900 void cik_dma_vm_flush(struct radeon_device
*rdev
, struct radeon_ring
*ring
,
901 unsigned vm_id
, uint64_t pd_addr
)
903 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
905 radeon_ring_write(ring
, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (vm_id
<< 2)) >> 2);
907 radeon_ring_write(ring
, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((vm_id
- 8) << 2)) >> 2);
909 radeon_ring_write(ring
, pd_addr
>> 12);
911 /* update SH_MEM_* regs */
912 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
913 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
914 radeon_ring_write(ring
, VMID(vm_id
));
916 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
917 radeon_ring_write(ring
, SH_MEM_BASES
>> 2);
918 radeon_ring_write(ring
, 0);
920 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
921 radeon_ring_write(ring
, SH_MEM_CONFIG
>> 2);
922 radeon_ring_write(ring
, 0);
924 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
925 radeon_ring_write(ring
, SH_MEM_APE1_BASE
>> 2);
926 radeon_ring_write(ring
, 1);
928 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
929 radeon_ring_write(ring
, SH_MEM_APE1_LIMIT
>> 2);
930 radeon_ring_write(ring
, 0);
932 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
933 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
934 radeon_ring_write(ring
, VMID(0));
937 cik_sdma_hdp_flush_ring_emit(rdev
, ring
->idx
);
940 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
941 radeon_ring_write(ring
, VM_INVALIDATE_REQUEST
>> 2);
942 radeon_ring_write(ring
, 1 << vm_id
);