]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drm/amdgpu: Add copy_pte_num_dw member in amdgpu_vm_pte_funcs
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / sdma_v2_4.c
CommitLineData
aaa36a97
AD
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29#include "vi.h"
30#include "vid.h"
31
32#include "oss/oss_2_4_d.h"
33#include "oss/oss_2_4_sh_mask.h"
34
16a8a49b
KW
35#include "gmc/gmc_7_1_d.h"
36#include "gmc/gmc_7_1_sh_mask.h"
aaa36a97
AD
37
38#include "gca/gfx_8_0_d.h"
74a5d165 39#include "gca/gfx_8_0_enum.h"
aaa36a97
AD
40#include "gca/gfx_8_0_sh_mask.h"
41
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44
45#include "iceland_sdma_pkt_open.h"
46
47static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
48static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
49static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
50static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
51
c65444fe
JZ
52MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
53MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
aaa36a97
AD
54
55static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
56{
57 SDMA0_REGISTER_OFFSET,
58 SDMA1_REGISTER_OFFSET
59};
60
61static const u32 golden_settings_iceland_a11[] =
62{
63 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
64 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
65 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
66 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
67};
68
69static const u32 iceland_mgcg_cgcg_init[] =
70{
71 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
72 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
73};
74
75/*
76 * sDMA - System DMA
77 * Starting with CIK, the GPU has new asynchronous
78 * DMA engines. These engines are used for compute
79 * and gfx. There are two DMA engines (SDMA0, SDMA1)
80 * and each one supports 1 ring buffer used for gfx
81 * and 2 queues used for compute.
82 *
83 * The programming model is very similar to the CP
84 * (ring buffer, IBs, etc.), but sDMA has it's own
85 * packet format that is different from the PM4 format
86 * used by the CP. sDMA supports copying data, writing
87 * embedded data, solid fills, and a number of other
88 * things. It also has support for tiling/detiling of
89 * buffers.
90 */
91
92static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
93{
94 switch (adev->asic_type) {
95 case CHIP_TOPAZ:
96 amdgpu_program_register_sequence(adev,
97 iceland_mgcg_cgcg_init,
98 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
99 amdgpu_program_register_sequence(adev,
100 golden_settings_iceland_a11,
101 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
102 break;
103 default:
104 break;
105 }
106}
107
9c55c520
ML
108static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
109{
110 int i;
111 for (i = 0; i < adev->sdma.num_instances; i++) {
112 release_firmware(adev->sdma.instance[i].fw);
113 adev->sdma.instance[i].fw = NULL;
114 }
115}
116
aaa36a97
AD
117/**
118 * sdma_v2_4_init_microcode - load ucode images from disk
119 *
120 * @adev: amdgpu_device pointer
121 *
122 * Use the firmware interface to load the ucode images into
123 * the driver (not loaded into hw).
124 * Returns 0 on success, error on failure.
125 */
126static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
127{
128 const char *chip_name;
129 char fw_name[30];
c113ea1c 130 int err = 0, i;
aaa36a97
AD
131 struct amdgpu_firmware_info *info = NULL;
132 const struct common_firmware_header *header = NULL;
595fd013 133 const struct sdma_firmware_header_v1_0 *hdr;
aaa36a97
AD
134
135 DRM_DEBUG("\n");
136
137 switch (adev->asic_type) {
138 case CHIP_TOPAZ:
139 chip_name = "topaz";
140 break;
141 default: BUG();
142 }
143
c113ea1c 144 for (i = 0; i < adev->sdma.num_instances; i++) {
aaa36a97 145 if (i == 0)
c65444fe 146 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
aaa36a97 147 else
c65444fe 148 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
c113ea1c 149 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
aaa36a97
AD
150 if (err)
151 goto out;
c113ea1c 152 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
aaa36a97
AD
153 if (err)
154 goto out;
c113ea1c
AD
155 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
156 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
157 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
158 if (adev->sdma.instance[i].feature_version >= 20)
159 adev->sdma.instance[i].burst_nop = true;
aaa36a97 160
e635ee07 161 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
aaa36a97
AD
162 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
163 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
c113ea1c 164 info->fw = adev->sdma.instance[i].fw;
aaa36a97
AD
165 header = (const struct common_firmware_header *)info->fw->data;
166 adev->firmware.fw_size +=
167 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
168 }
169 }
170
171out:
172 if (err) {
7ca85295 173 pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
c113ea1c
AD
174 for (i = 0; i < adev->sdma.num_instances; i++) {
175 release_firmware(adev->sdma.instance[i].fw);
176 adev->sdma.instance[i].fw = NULL;
aaa36a97
AD
177 }
178 }
179 return err;
180}
181
182/**
183 * sdma_v2_4_ring_get_rptr - get the current read pointer
184 *
185 * @ring: amdgpu ring pointer
186 *
187 * Get the current rptr from the hardware (VI+).
188 */
536fbf94 189static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
aaa36a97 190{
aaa36a97 191 /* XXX check if swapping is necessary on BE */
d912adef 192 return ring->adev->wb.wb[ring->rptr_offs] >> 2;
aaa36a97
AD
193}
194
195/**
196 * sdma_v2_4_ring_get_wptr - get the current write pointer
197 *
198 * @ring: amdgpu ring pointer
199 *
200 * Get the current wptr from the hardware (VI+).
201 */
536fbf94 202static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
aaa36a97
AD
203{
204 struct amdgpu_device *adev = ring->adev;
c113ea1c 205 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
aaa36a97
AD
206 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
207
208 return wptr;
209}
210
211/**
212 * sdma_v2_4_ring_set_wptr - commit the write pointer
213 *
214 * @ring: amdgpu ring pointer
215 *
216 * Write the wptr back to the hardware (VI+).
217 */
218static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
219{
220 struct amdgpu_device *adev = ring->adev;
c113ea1c 221 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
aaa36a97 222
536fbf94 223 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
aaa36a97
AD
224}
225
ac01db3d
JZ
226static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
227{
c113ea1c 228 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
ac01db3d
JZ
229 int i;
230
231 for (i = 0; i < count; i++)
232 if (sdma && sdma->burst_nop && (i == 0))
79887142 233 amdgpu_ring_write(ring, ring->funcs->nop |
ac01db3d
JZ
234 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
235 else
79887142 236 amdgpu_ring_write(ring, ring->funcs->nop);
ac01db3d
JZ
237}
238
aaa36a97
AD
239/**
240 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
241 *
242 * @ring: amdgpu ring pointer
243 * @ib: IB object to schedule
244 *
245 * Schedule an IB in the DMA ring (VI).
246 */
247static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
d88bf583
CK
248 struct amdgpu_ib *ib,
249 unsigned vm_id, bool ctx_switch)
aaa36a97 250{
d88bf583 251 u32 vmid = vm_id & 0xf;
aaa36a97 252
aaa36a97 253 /* IB packet must end on a 8 DW boundary */
536fbf94 254 sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
ac01db3d 255
aaa36a97
AD
256 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
257 SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
258 /* base must be 32 byte aligned */
259 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
260 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
261 amdgpu_ring_write(ring, ib->length_dw);
262 amdgpu_ring_write(ring, 0);
263 amdgpu_ring_write(ring, 0);
264
265}
266
267/**
268 * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
269 *
270 * @ring: amdgpu ring pointer
271 *
272 * Emit an hdp flush packet on the requested DMA ring.
273 */
d2edb07b 274static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
aaa36a97
AD
275{
276 u32 ref_and_mask = 0;
277
c113ea1c 278 if (ring == &ring->adev->sdma.instance[0].ring)
aaa36a97
AD
279 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
280 else
281 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
282
283 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
284 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
285 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
286 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
287 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
288 amdgpu_ring_write(ring, ref_and_mask); /* reference */
289 amdgpu_ring_write(ring, ref_and_mask); /* mask */
290 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
291 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
292}
293
6ad550c3
CZ
294static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
295{
296 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
297 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
298 amdgpu_ring_write(ring, mmHDP_DEBUG0);
299 amdgpu_ring_write(ring, 1);
300}
aaa36a97
AD
301/**
302 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
303 *
304 * @ring: amdgpu ring pointer
305 * @fence: amdgpu fence object
306 *
307 * Add a DMA fence packet to the ring to write
308 * the fence seq number and DMA trap packet to generate
309 * an interrupt if needed (VI).
310 */
311static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
890ee23f 312 unsigned flags)
aaa36a97 313{
890ee23f 314 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
aaa36a97
AD
315 /* write the fence */
316 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
317 amdgpu_ring_write(ring, lower_32_bits(addr));
318 amdgpu_ring_write(ring, upper_32_bits(addr));
319 amdgpu_ring_write(ring, lower_32_bits(seq));
320
321 /* optionally write high bits as well */
890ee23f 322 if (write64bit) {
aaa36a97
AD
323 addr += 4;
324 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
325 amdgpu_ring_write(ring, lower_32_bits(addr));
326 amdgpu_ring_write(ring, upper_32_bits(addr));
327 amdgpu_ring_write(ring, upper_32_bits(seq));
328 }
329
330 /* generate an interrupt */
331 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
332 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
333}
334
aaa36a97
AD
335/**
336 * sdma_v2_4_gfx_stop - stop the gfx async dma engines
337 *
338 * @adev: amdgpu_device pointer
339 *
340 * Stop the gfx async dma ring buffers (VI).
341 */
342static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
343{
c113ea1c
AD
344 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
345 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
aaa36a97
AD
346 u32 rb_cntl, ib_cntl;
347 int i;
348
349 if ((adev->mman.buffer_funcs_ring == sdma0) ||
350 (adev->mman.buffer_funcs_ring == sdma1))
351 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
352
c113ea1c 353 for (i = 0; i < adev->sdma.num_instances; i++) {
aaa36a97
AD
354 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
355 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
356 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
357 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
358 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
359 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
360 }
361 sdma0->ready = false;
362 sdma1->ready = false;
363}
364
365/**
366 * sdma_v2_4_rlc_stop - stop the compute async dma engines
367 *
368 * @adev: amdgpu_device pointer
369 *
370 * Stop the compute async dma queues (VI).
371 */
372static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
373{
374 /* XXX todo */
375}
376
377/**
378 * sdma_v2_4_enable - stop the async dma engines
379 *
380 * @adev: amdgpu_device pointer
381 * @enable: enable/disable the DMA MEs.
382 *
383 * Halt or unhalt the async dma engines (VI).
384 */
385static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
386{
387 u32 f32_cntl;
388 int i;
389
004e29cc 390 if (!enable) {
aaa36a97
AD
391 sdma_v2_4_gfx_stop(adev);
392 sdma_v2_4_rlc_stop(adev);
393 }
394
c113ea1c 395 for (i = 0; i < adev->sdma.num_instances; i++) {
aaa36a97
AD
396 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
397 if (enable)
398 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
399 else
400 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
401 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
402 }
403}
404
405/**
406 * sdma_v2_4_gfx_resume - setup and start the async dma engines
407 *
408 * @adev: amdgpu_device pointer
409 *
410 * Set up the gfx DMA ring buffers and enable them (VI).
411 * Returns 0 for success, error for failure.
412 */
413static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
414{
415 struct amdgpu_ring *ring;
416 u32 rb_cntl, ib_cntl;
417 u32 rb_bufsz;
418 u32 wb_offset;
419 int i, j, r;
420
c113ea1c
AD
421 for (i = 0; i < adev->sdma.num_instances; i++) {
422 ring = &adev->sdma.instance[i].ring;
aaa36a97
AD
423 wb_offset = (ring->rptr_offs * 4);
424
425 mutex_lock(&adev->srbm_mutex);
426 for (j = 0; j < 16; j++) {
427 vi_srbm_select(adev, 0, 0, 0, j);
428 /* SDMA GFX */
429 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
430 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
431 }
432 vi_srbm_select(adev, 0, 0, 0, 0);
433 mutex_unlock(&adev->srbm_mutex);
434
c458fe94
AD
435 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
436 adev->gfx.config.gb_addr_config & 0x70);
437
aaa36a97
AD
438 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
439
440 /* Set ring buffer size in dwords */
441 rb_bufsz = order_base_2(ring->ring_size / 4);
442 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
443 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
444#ifdef __BIG_ENDIAN
445 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
446 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
447 RPTR_WRITEBACK_SWAP_ENABLE, 1);
448#endif
449 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
450
451 /* Initialize the ring buffer's read and write pointers */
452 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
453 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
d72f7c06
ML
454 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
455 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
aaa36a97
AD
456
457 /* set the wb address whether it's enabled or not */
458 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
459 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
460 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
461 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
462
463 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
464
465 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
466 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
467
468 ring->wptr = 0;
536fbf94 469 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
aaa36a97
AD
470
471 /* enable DMA RB */
472 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
473 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
474
475 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
476 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
477#ifdef __BIG_ENDIAN
478 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
479#endif
480 /* enable DMA IBs */
481 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
482
483 ring->ready = true;
505dfe76 484 }
aaa36a97 485
505dfe76
ML
486 sdma_v2_4_enable(adev, true);
487 for (i = 0; i < adev->sdma.num_instances; i++) {
488 ring = &adev->sdma.instance[i].ring;
aaa36a97
AD
489 r = amdgpu_ring_test_ring(ring);
490 if (r) {
491 ring->ready = false;
492 return r;
493 }
494
495 if (adev->mman.buffer_funcs_ring == ring)
496 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
497 }
498
499 return 0;
500}
501
502/**
503 * sdma_v2_4_rlc_resume - setup and start the async dma engines
504 *
505 * @adev: amdgpu_device pointer
506 *
507 * Set up the compute DMA queues and enable them (VI).
508 * Returns 0 for success, error for failure.
509 */
510static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
511{
512 /* XXX todo */
513 return 0;
514}
515
516/**
517 * sdma_v2_4_load_microcode - load the sDMA ME ucode
518 *
519 * @adev: amdgpu_device pointer
520 *
521 * Loads the sDMA0/1 ucode.
522 * Returns 0 for success, -EINVAL if the ucode is not available.
523 */
524static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
525{
526 const struct sdma_firmware_header_v1_0 *hdr;
527 const __le32 *fw_data;
528 u32 fw_size;
529 int i, j;
aaa36a97
AD
530
531 /* halt the MEs */
532 sdma_v2_4_enable(adev, false);
533
c113ea1c
AD
534 for (i = 0; i < adev->sdma.num_instances; i++) {
535 if (!adev->sdma.instance[i].fw)
536 return -EINVAL;
537 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
538 amdgpu_ucode_print_sdma_hdr(&hdr->header);
539 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
540 fw_data = (const __le32 *)
541 (adev->sdma.instance[i].fw->data +
542 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
543 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
544 for (j = 0; j < fw_size; j++)
545 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
546 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
aaa36a97
AD
547 }
548
549 return 0;
550}
551
552/**
553 * sdma_v2_4_start - setup and start the async dma engines
554 *
555 * @adev: amdgpu_device pointer
556 *
557 * Set up the DMA engines and enable them (VI).
558 * Returns 0 for success, error for failure.
559 */
560static int sdma_v2_4_start(struct amdgpu_device *adev)
561{
562 int r;
563
86a42f04 564 if (!adev->pp_enabled) {
e635ee07 565 if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
86a42f04
HR
566 r = sdma_v2_4_load_microcode(adev);
567 if (r)
568 return r;
569 } else {
570 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
571 AMDGPU_UCODE_ID_SDMA0);
572 if (r)
573 return -EINVAL;
574 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
575 AMDGPU_UCODE_ID_SDMA1);
576 if (r)
577 return -EINVAL;
578 }
aaa36a97
AD
579 }
580
505dfe76
ML
581 /* halt the engine before programing */
582 sdma_v2_4_enable(adev, false);
aaa36a97
AD
583
584 /* start the gfx rings and rlc compute queues */
585 r = sdma_v2_4_gfx_resume(adev);
586 if (r)
587 return r;
588 r = sdma_v2_4_rlc_resume(adev);
589 if (r)
590 return r;
591
592 return 0;
593}
594
595/**
596 * sdma_v2_4_ring_test_ring - simple async dma engine test
597 *
598 * @ring: amdgpu_ring structure holding ring information
599 *
600 * Test the DMA engine by writing using it to write an
601 * value to memory. (VI).
602 * Returns 0 for success, error for failure.
603 */
604static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
605{
606 struct amdgpu_device *adev = ring->adev;
607 unsigned i;
608 unsigned index;
609 int r;
610 u32 tmp;
611 u64 gpu_addr;
612
613 r = amdgpu_wb_get(adev, &index);
614 if (r) {
615 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
616 return r;
617 }
618
619 gpu_addr = adev->wb.gpu_addr + (index * 4);
620 tmp = 0xCAFEDEAD;
621 adev->wb.wb[index] = cpu_to_le32(tmp);
622
a27de35c 623 r = amdgpu_ring_alloc(ring, 5);
aaa36a97
AD
624 if (r) {
625 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
626 amdgpu_wb_free(adev, index);
627 return r;
628 }
629
630 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
631 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
632 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
633 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
634 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
635 amdgpu_ring_write(ring, 0xDEADBEEF);
a27de35c 636 amdgpu_ring_commit(ring);
aaa36a97
AD
637
638 for (i = 0; i < adev->usec_timeout; i++) {
639 tmp = le32_to_cpu(adev->wb.wb[index]);
640 if (tmp == 0xDEADBEEF)
641 break;
642 DRM_UDELAY(1);
643 }
644
645 if (i < adev->usec_timeout) {
646 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
647 } else {
648 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
649 ring->idx, tmp);
650 r = -EINVAL;
651 }
652 amdgpu_wb_free(adev, index);
653
654 return r;
655}
656
657/**
658 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine
659 *
660 * @ring: amdgpu_ring structure holding ring information
661 *
662 * Test a simple IB in the DMA ring (VI).
663 * Returns 0 on success, error on failure.
664 */
bbec97aa 665static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
aaa36a97
AD
666{
667 struct amdgpu_device *adev = ring->adev;
668 struct amdgpu_ib ib;
f54d1867 669 struct dma_fence *f = NULL;
aaa36a97 670 unsigned index;
aaa36a97
AD
671 u32 tmp = 0;
672 u64 gpu_addr;
bbec97aa 673 long r;
aaa36a97
AD
674
675 r = amdgpu_wb_get(adev, &index);
676 if (r) {
bbec97aa 677 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
aaa36a97
AD
678 return r;
679 }
680
681 gpu_addr = adev->wb.gpu_addr + (index * 4);
682 tmp = 0xCAFEDEAD;
683 adev->wb.wb[index] = cpu_to_le32(tmp);
b203dd95 684 memset(&ib, 0, sizeof(ib));
b07c60c0 685 r = amdgpu_ib_get(adev, NULL, 256, &ib);
aaa36a97 686 if (r) {
bbec97aa 687 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
0011fdaa 688 goto err0;
aaa36a97
AD
689 }
690
691 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
692 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
693 ib.ptr[1] = lower_32_bits(gpu_addr);
694 ib.ptr[2] = upper_32_bits(gpu_addr);
695 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
696 ib.ptr[4] = 0xDEADBEEF;
697 ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
698 ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
699 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
700 ib.length_dw = 8;
701
50ddc75e 702 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
0011fdaa
CZ
703 if (r)
704 goto err1;
705
f54d1867 706 r = dma_fence_wait_timeout(f, false, timeout);
bbec97aa
CK
707 if (r == 0) {
708 DRM_ERROR("amdgpu: IB test timed out\n");
709 r = -ETIMEDOUT;
710 goto err1;
847927bb 711 } else if (r < 0) {
bbec97aa 712 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
0011fdaa 713 goto err1;
aaa36a97 714 }
6d44565d
CK
715 tmp = le32_to_cpu(adev->wb.wb[index]);
716 if (tmp == 0xDEADBEEF) {
717 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
bbec97aa 718 r = 0;
aaa36a97
AD
719 } else {
720 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
721 r = -EINVAL;
722 }
0011fdaa
CZ
723
724err1:
cc55c45d 725 amdgpu_ib_free(adev, &ib, NULL);
f54d1867 726 dma_fence_put(f);
0011fdaa 727err0:
aaa36a97
AD
728 amdgpu_wb_free(adev, index);
729 return r;
730}
731
732/**
733 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
734 *
735 * @ib: indirect buffer to fill with commands
736 * @pe: addr of the page entry
737 * @src: src addr to copy from
738 * @count: number of page entries to update
739 *
740 * Update PTEs by copying them from the GART using sDMA (CIK).
741 */
742static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
743 uint64_t pe, uint64_t src,
744 unsigned count)
745{
96105e53
CK
746 unsigned bytes = count * 8;
747
748 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
749 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
750 ib->ptr[ib->length_dw++] = bytes;
751 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
752 ib->ptr[ib->length_dw++] = lower_32_bits(src);
753 ib->ptr[ib->length_dw++] = upper_32_bits(src);
754 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
755 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
aaa36a97
AD
756}
757
758/**
759 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually
760 *
761 * @ib: indirect buffer to fill with commands
762 * @pe: addr of the page entry
de9ea7bd 763 * @value: dst addr to write into pe
aaa36a97
AD
764 * @count: number of page entries to update
765 * @incr: increase next addr by incr bytes
aaa36a97
AD
766 *
767 * Update PTEs by writing them manually using sDMA (CIK).
768 */
de9ea7bd
CK
769static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
770 uint64_t value, unsigned count,
771 uint32_t incr)
aaa36a97 772{
de9ea7bd
CK
773 unsigned ndw = count * 2;
774
775 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
6bf3f9c3 776 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
de9ea7bd
CK
777 ib->ptr[ib->length_dw++] = pe;
778 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
779 ib->ptr[ib->length_dw++] = ndw;
99e3820a 780 for (; ndw > 0; ndw -= 2) {
de9ea7bd
CK
781 ib->ptr[ib->length_dw++] = lower_32_bits(value);
782 ib->ptr[ib->length_dw++] = upper_32_bits(value);
783 value += incr;
aaa36a97
AD
784 }
785}
786
787/**
788 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
789 *
790 * @ib: indirect buffer to fill with commands
791 * @pe: addr of the page entry
792 * @addr: dst addr to write into pe
793 * @count: number of page entries to update
794 * @incr: increase next addr by incr bytes
795 * @flags: access flags
796 *
797 * Update the page tables using sDMA (CIK).
798 */
96105e53 799static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
aaa36a97 800 uint64_t addr, unsigned count,
6b777607 801 uint32_t incr, uint64_t flags)
aaa36a97 802{
96105e53
CK
803 /* for physically contiguous pages (vram) */
804 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
805 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
806 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
b9be700e
JZ
807 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
808 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
96105e53
CK
809 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
810 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
811 ib->ptr[ib->length_dw++] = incr; /* increment size */
812 ib->ptr[ib->length_dw++] = 0;
813 ib->ptr[ib->length_dw++] = count; /* number of entries */
aaa36a97
AD
814}
815
816/**
9e5d5309 817 * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
aaa36a97
AD
818 *
819 * @ib: indirect buffer to fill with padding
820 *
821 */
9e5d5309 822static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
aaa36a97 823{
9e5d5309 824 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
ac01db3d
JZ
825 u32 pad_count;
826 int i;
827
828 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
829 for (i = 0; i < pad_count; i++)
830 if (sdma && sdma->burst_nop && (i == 0))
831 ib->ptr[ib->length_dw++] =
832 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
833 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
834 else
835 ib->ptr[ib->length_dw++] =
836 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
aaa36a97
AD
837}
838
839/**
00b7c4ff 840 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
aaa36a97
AD
841 *
842 * @ring: amdgpu_ring pointer
aaa36a97 843 *
00b7c4ff 844 * Make sure all previous operations are completed (CIK).
aaa36a97 845 */
00b7c4ff 846static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
aaa36a97 847{
5c55db83
CZ
848 uint32_t seq = ring->fence_drv.sync_seq;
849 uint64_t addr = ring->fence_drv.gpu_addr;
850
851 /* wait for idle */
852 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
853 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
854 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
855 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
856 amdgpu_ring_write(ring, addr & 0xfffffffc);
857 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
858 amdgpu_ring_write(ring, seq); /* reference */
859 amdgpu_ring_write(ring, 0xfffffff); /* mask */
860 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
861 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
00b7c4ff 862}
5c55db83 863
00b7c4ff
CK
864/**
865 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
866 *
867 * @ring: amdgpu_ring pointer
868 * @vm: amdgpu_vm pointer
869 *
870 * Update the page table base and flush the VM TLB
871 * using sDMA (VI).
872 */
873static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
874 unsigned vm_id, uint64_t pd_addr)
875{
aaa36a97
AD
876 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
877 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
878 if (vm_id < 8) {
879 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
880 } else {
881 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
882 }
883 amdgpu_ring_write(ring, pd_addr >> 12);
884
aaa36a97
AD
885 /* flush TLB */
886 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
887 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
888 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
889 amdgpu_ring_write(ring, 1 << vm_id);
890
891 /* wait for flush */
892 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
893 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
894 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
895 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
896 amdgpu_ring_write(ring, 0);
897 amdgpu_ring_write(ring, 0); /* reference */
898 amdgpu_ring_write(ring, 0); /* mask */
899 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
900 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
901}
902
5fc3aeeb 903static int sdma_v2_4_early_init(void *handle)
aaa36a97 904{
5fc3aeeb 905 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
906
c113ea1c
AD
907 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
908
aaa36a97
AD
909 sdma_v2_4_set_ring_funcs(adev);
910 sdma_v2_4_set_buffer_funcs(adev);
911 sdma_v2_4_set_vm_pte_funcs(adev);
912 sdma_v2_4_set_irq_funcs(adev);
913
914 return 0;
915}
916
5fc3aeeb 917static int sdma_v2_4_sw_init(void *handle)
aaa36a97
AD
918{
919 struct amdgpu_ring *ring;
c113ea1c 920 int r, i;
5fc3aeeb 921 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
922
923 /* SDMA trap event */
d766e6a3
AD
924 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
925 &adev->sdma.trap_irq);
aaa36a97
AD
926 if (r)
927 return r;
928
929 /* SDMA Privileged inst */
d766e6a3
AD
930 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
931 &adev->sdma.illegal_inst_irq);
aaa36a97
AD
932 if (r)
933 return r;
934
935 /* SDMA Privileged inst */
d766e6a3
AD
936 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
937 &adev->sdma.illegal_inst_irq);
aaa36a97
AD
938 if (r)
939 return r;
940
941 r = sdma_v2_4_init_microcode(adev);
942 if (r) {
943 DRM_ERROR("Failed to load sdma firmware!\n");
944 return r;
945 }
946
c113ea1c
AD
947 for (i = 0; i < adev->sdma.num_instances; i++) {
948 ring = &adev->sdma.instance[i].ring;
949 ring->ring_obj = NULL;
950 ring->use_doorbell = false;
951 sprintf(ring->name, "sdma%d", i);
b38d99c4 952 r = amdgpu_ring_init(adev, ring, 1024,
c113ea1c
AD
953 &adev->sdma.trap_irq,
954 (i == 0) ?
21cd942e
CK
955 AMDGPU_SDMA_IRQ_TRAP0 :
956 AMDGPU_SDMA_IRQ_TRAP1);
c113ea1c
AD
957 if (r)
958 return r;
959 }
aaa36a97
AD
960
961 return r;
962}
963
5fc3aeeb 964static int sdma_v2_4_sw_fini(void *handle)
aaa36a97 965{
5fc3aeeb 966 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
c113ea1c 967 int i;
5fc3aeeb 968
c113ea1c
AD
969 for (i = 0; i < adev->sdma.num_instances; i++)
970 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
aaa36a97 971
9c55c520 972 sdma_v2_4_free_microcode(adev);
aaa36a97
AD
973 return 0;
974}
975
5fc3aeeb 976static int sdma_v2_4_hw_init(void *handle)
aaa36a97
AD
977{
978 int r;
5fc3aeeb 979 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
980
981 sdma_v2_4_init_golden_registers(adev);
982
983 r = sdma_v2_4_start(adev);
984 if (r)
985 return r;
986
987 return r;
988}
989
5fc3aeeb 990static int sdma_v2_4_hw_fini(void *handle)
aaa36a97 991{
5fc3aeeb 992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993
aaa36a97
AD
994 sdma_v2_4_enable(adev, false);
995
996 return 0;
997}
998
5fc3aeeb 999static int sdma_v2_4_suspend(void *handle)
aaa36a97 1000{
5fc3aeeb 1001 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1002
1003 return sdma_v2_4_hw_fini(adev);
1004}
1005
5fc3aeeb 1006static int sdma_v2_4_resume(void *handle)
aaa36a97 1007{
5fc3aeeb 1008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1009
1010 return sdma_v2_4_hw_init(adev);
1011}
1012
5fc3aeeb 1013static bool sdma_v2_4_is_idle(void *handle)
aaa36a97 1014{
5fc3aeeb 1015 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1016 u32 tmp = RREG32(mmSRBM_STATUS2);
1017
1018 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1019 SRBM_STATUS2__SDMA1_BUSY_MASK))
1020 return false;
1021
1022 return true;
1023}
1024
5fc3aeeb 1025static int sdma_v2_4_wait_for_idle(void *handle)
aaa36a97
AD
1026{
1027 unsigned i;
1028 u32 tmp;
5fc3aeeb 1029 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1030
1031 for (i = 0; i < adev->usec_timeout; i++) {
1032 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1033 SRBM_STATUS2__SDMA1_BUSY_MASK);
1034
1035 if (!tmp)
1036 return 0;
1037 udelay(1);
1038 }
1039 return -ETIMEDOUT;
1040}
1041
5fc3aeeb 1042static int sdma_v2_4_soft_reset(void *handle)
aaa36a97
AD
1043{
1044 u32 srbm_soft_reset = 0;
5fc3aeeb 1045 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
aaa36a97
AD
1046 u32 tmp = RREG32(mmSRBM_STATUS2);
1047
1048 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1049 /* sdma0 */
1050 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1051 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1052 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1053 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1054 }
1055 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1056 /* sdma1 */
1057 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1058 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
1059 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1060 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1061 }
1062
1063 if (srbm_soft_reset) {
aaa36a97
AD
1064 tmp = RREG32(mmSRBM_SOFT_RESET);
1065 tmp |= srbm_soft_reset;
1066 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1067 WREG32(mmSRBM_SOFT_RESET, tmp);
1068 tmp = RREG32(mmSRBM_SOFT_RESET);
1069
1070 udelay(50);
1071
1072 tmp &= ~srbm_soft_reset;
1073 WREG32(mmSRBM_SOFT_RESET, tmp);
1074 tmp = RREG32(mmSRBM_SOFT_RESET);
1075
1076 /* Wait a little for things to settle down */
1077 udelay(50);
aaa36a97
AD
1078 }
1079
1080 return 0;
1081}
1082
1083static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
1084 struct amdgpu_irq_src *src,
1085 unsigned type,
1086 enum amdgpu_interrupt_state state)
1087{
1088 u32 sdma_cntl;
1089
1090 switch (type) {
1091 case AMDGPU_SDMA_IRQ_TRAP0:
1092 switch (state) {
1093 case AMDGPU_IRQ_STATE_DISABLE:
1094 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1095 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1096 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1097 break;
1098 case AMDGPU_IRQ_STATE_ENABLE:
1099 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1100 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1101 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1102 break;
1103 default:
1104 break;
1105 }
1106 break;
1107 case AMDGPU_SDMA_IRQ_TRAP1:
1108 switch (state) {
1109 case AMDGPU_IRQ_STATE_DISABLE:
1110 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1111 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1112 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1113 break;
1114 case AMDGPU_IRQ_STATE_ENABLE:
1115 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1116 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1117 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1118 break;
1119 default:
1120 break;
1121 }
1122 break;
1123 default:
1124 break;
1125 }
1126 return 0;
1127}
1128
1129static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1130 struct amdgpu_irq_src *source,
1131 struct amdgpu_iv_entry *entry)
1132{
1133 u8 instance_id, queue_id;
1134
1135 instance_id = (entry->ring_id & 0x3) >> 0;
1136 queue_id = (entry->ring_id & 0xc) >> 2;
1137 DRM_DEBUG("IH: SDMA trap\n");
1138 switch (instance_id) {
1139 case 0:
1140 switch (queue_id) {
1141 case 0:
c113ea1c 1142 amdgpu_fence_process(&adev->sdma.instance[0].ring);
aaa36a97
AD
1143 break;
1144 case 1:
1145 /* XXX compute */
1146 break;
1147 case 2:
1148 /* XXX compute */
1149 break;
1150 }
1151 break;
1152 case 1:
1153 switch (queue_id) {
1154 case 0:
c113ea1c 1155 amdgpu_fence_process(&adev->sdma.instance[1].ring);
aaa36a97
AD
1156 break;
1157 case 1:
1158 /* XXX compute */
1159 break;
1160 case 2:
1161 /* XXX compute */
1162 break;
1163 }
1164 break;
1165 }
1166 return 0;
1167}
1168
1169static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1170 struct amdgpu_irq_src *source,
1171 struct amdgpu_iv_entry *entry)
1172{
1173 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1174 schedule_work(&adev->reset_work);
1175 return 0;
1176}
1177
5fc3aeeb 1178static int sdma_v2_4_set_clockgating_state(void *handle,
1179 enum amd_clockgating_state state)
aaa36a97
AD
1180{
1181 /* XXX handled via the smc on VI */
aaa36a97
AD
1182 return 0;
1183}
1184
5fc3aeeb 1185static int sdma_v2_4_set_powergating_state(void *handle,
1186 enum amd_powergating_state state)
aaa36a97
AD
1187{
1188 return 0;
1189}
1190
a1255107 1191static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
88a907d6 1192 .name = "sdma_v2_4",
aaa36a97
AD
1193 .early_init = sdma_v2_4_early_init,
1194 .late_init = NULL,
1195 .sw_init = sdma_v2_4_sw_init,
1196 .sw_fini = sdma_v2_4_sw_fini,
1197 .hw_init = sdma_v2_4_hw_init,
1198 .hw_fini = sdma_v2_4_hw_fini,
1199 .suspend = sdma_v2_4_suspend,
1200 .resume = sdma_v2_4_resume,
1201 .is_idle = sdma_v2_4_is_idle,
1202 .wait_for_idle = sdma_v2_4_wait_for_idle,
1203 .soft_reset = sdma_v2_4_soft_reset,
aaa36a97
AD
1204 .set_clockgating_state = sdma_v2_4_set_clockgating_state,
1205 .set_powergating_state = sdma_v2_4_set_powergating_state,
1206};
1207
aaa36a97 1208static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
21cd942e 1209 .type = AMDGPU_RING_TYPE_SDMA,
79887142
CK
1210 .align_mask = 0xf,
1211 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
536fbf94 1212 .support_64bit_ptrs = false,
aaa36a97
AD
1213 .get_rptr = sdma_v2_4_ring_get_rptr,
1214 .get_wptr = sdma_v2_4_ring_get_wptr,
1215 .set_wptr = sdma_v2_4_ring_set_wptr,
e12f3d7a
CK
1216 .emit_frame_size =
1217 6 + /* sdma_v2_4_ring_emit_hdp_flush */
1218 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
1219 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
1220 12 + /* sdma_v2_4_ring_emit_vm_flush */
1221 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
1222 .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
aaa36a97
AD
1223 .emit_ib = sdma_v2_4_ring_emit_ib,
1224 .emit_fence = sdma_v2_4_ring_emit_fence,
00b7c4ff 1225 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
aaa36a97 1226 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
d2edb07b 1227 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
6ad550c3 1228 .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
aaa36a97
AD
1229 .test_ring = sdma_v2_4_ring_test_ring,
1230 .test_ib = sdma_v2_4_ring_test_ib,
ac01db3d 1231 .insert_nop = sdma_v2_4_ring_insert_nop,
9e5d5309 1232 .pad_ib = sdma_v2_4_ring_pad_ib,
aaa36a97
AD
1233};
1234
1235static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1236{
c113ea1c
AD
1237 int i;
1238
1239 for (i = 0; i < adev->sdma.num_instances; i++)
1240 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
aaa36a97
AD
1241}
1242
1243static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1244 .set = sdma_v2_4_set_trap_irq_state,
1245 .process = sdma_v2_4_process_trap_irq,
1246};
1247
1248static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1249 .process = sdma_v2_4_process_illegal_inst_irq,
1250};
1251
1252static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1253{
c113ea1c
AD
1254 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1255 adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1256 adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
aaa36a97
AD
1257}
1258
1259/**
1260 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
1261 *
1262 * @ring: amdgpu_ring structure holding ring information
1263 * @src_offset: src GPU address
1264 * @dst_offset: dst GPU address
1265 * @byte_count: number of bytes to xfer
1266 *
1267 * Copy GPU buffers using the DMA engine (VI).
1268 * Used by the amdgpu ttm implementation to move pages if
1269 * registered as the asic copy callback.
1270 */
c7ae72c0 1271static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
aaa36a97
AD
1272 uint64_t src_offset,
1273 uint64_t dst_offset,
1274 uint32_t byte_count)
1275{
c7ae72c0
CZ
1276 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1277 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1278 ib->ptr[ib->length_dw++] = byte_count;
1279 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1280 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1281 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1282 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1283 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
aaa36a97
AD
1284}
1285
1286/**
1287 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
1288 *
1289 * @ring: amdgpu_ring structure holding ring information
1290 * @src_data: value to write to buffer
1291 * @dst_offset: dst GPU address
1292 * @byte_count: number of bytes to xfer
1293 *
1294 * Fill GPU buffers using the DMA engine (VI).
1295 */
6e7a3840 1296static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
aaa36a97
AD
1297 uint32_t src_data,
1298 uint64_t dst_offset,
1299 uint32_t byte_count)
1300{
6e7a3840
CZ
1301 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1302 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1303 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1304 ib->ptr[ib->length_dw++] = src_data;
1305 ib->ptr[ib->length_dw++] = byte_count;
aaa36a97
AD
1306}
1307
1308static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1309 .copy_max_bytes = 0x1fffff,
1310 .copy_num_dw = 7,
1311 .emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1312
1313 .fill_max_bytes = 0x1fffff,
1314 .fill_num_dw = 7,
1315 .emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1316};
1317
1318static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1319{
1320 if (adev->mman.buffer_funcs == NULL) {
1321 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
c113ea1c 1322 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
aaa36a97
AD
1323 }
1324}
1325
1326static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
e6d92197 1327 .copy_pte_num_dw = 7,
aaa36a97 1328 .copy_pte = sdma_v2_4_vm_copy_pte,
e6d92197 1329
aaa36a97 1330 .write_pte = sdma_v2_4_vm_write_pte,
7bdc53f9
YZ
1331
1332 .set_max_nums_pte_pde = 0x1fffff >> 3,
1333 .set_pte_pde_num_dw = 10,
aaa36a97 1334 .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
aaa36a97
AD
1335};
1336
1337static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1338{
2d55e45a
CK
1339 unsigned i;
1340
aaa36a97
AD
1341 if (adev->vm_manager.vm_pte_funcs == NULL) {
1342 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
2d55e45a
CK
1343 for (i = 0; i < adev->sdma.num_instances; i++)
1344 adev->vm_manager.vm_pte_rings[i] =
1345 &adev->sdma.instance[i].ring;
1346
1347 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
aaa36a97
AD
1348 }
1349}
a1255107
AD
1350
1351const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
1352{
1353 .type = AMD_IP_BLOCK_TYPE_SDMA,
1354 .major = 2,
1355 .minor = 4,
1356 .rev = 0,
1357 .funcs = &sdma_v2_4_ip_funcs,
1358};