2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_gfx.h"
30 #include "vega10/soc15ip.h"
31 #include "vega10/GC/gc_9_0_offset.h"
32 #include "vega10/GC/gc_9_0_sh_mask.h"
33 #include "vega10/vega10_enum.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
36 #include "soc15_common.h"
37 #include "clearstate_gfx9.h"
38 #include "v9_structs.h"
40 #define GFX9_NUM_GFX_RINGS 1
41 #define GFX9_MEC_HPD_SIZE 2048
42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
43 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
44 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
46 #define mmPWR_MISC_CNTL_STATUS 0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
53 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
54 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
55 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
60 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
61 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
62 MODULE_FIRMWARE("amdgpu/raven_me.bin");
63 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
64 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
65 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
67 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset
[] =
69 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID0_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID0_SIZE
),
70 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID0
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID0
)},
71 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID1_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID1_SIZE
),
72 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID1
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID1
)},
73 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID2_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID2_SIZE
),
74 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID2
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID2
)},
75 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID3_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID3_SIZE
),
76 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID3
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID3
)},
77 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID4_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID4_SIZE
),
78 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID4
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID4
)},
79 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID5_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID5_SIZE
),
80 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID5
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID5
)},
81 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID6_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID6_SIZE
),
82 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID6
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID6
)},
83 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID7_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID7_SIZE
),
84 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID7
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID7
)},
85 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID8_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID8_SIZE
),
86 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID8
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID8
)},
87 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID9_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID9_SIZE
),
88 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID9
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID9
)},
89 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID10_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID10_SIZE
),
90 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID10
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID10
)},
91 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID11_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID11_SIZE
),
92 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID11
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID11
)},
93 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID12_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID12_SIZE
),
94 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID12
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID12
)},
95 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID13_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID13_SIZE
),
96 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID13
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID13
)},
97 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID14_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID14_SIZE
),
98 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID14
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID14
)},
99 {SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID15_BASE
), SOC15_REG_OFFSET(GC
, 0, mmGDS_VMID15_SIZE
),
100 SOC15_REG_OFFSET(GC
, 0, mmGDS_GWS_VMID15
), SOC15_REG_OFFSET(GC
, 0, mmGDS_OA_VMID15
)}
103 static const u32 golden_settings_gc_9_0
[] =
105 SOC15_REG_OFFSET(GC
, 0, mmCPC_UTCL1_CNTL
), 0x08000000, 0x08000080,
106 SOC15_REG_OFFSET(GC
, 0, mmCPF_UTCL1_CNTL
), 0x08000000, 0x08000080,
107 SOC15_REG_OFFSET(GC
, 0, mmCPG_UTCL1_CNTL
), 0x08000000, 0x08000080,
108 SOC15_REG_OFFSET(GC
, 0, mmDB_DEBUG2
), 0xf00fffff, 0x00000420,
109 SOC15_REG_OFFSET(GC
, 0, mmGB_GPU_ID
), 0x0000000f, 0x00000000,
110 SOC15_REG_OFFSET(GC
, 0, mmIA_UTCL1_CNTL
), 0x08000000, 0x08000080,
111 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_BINNER_EVENT_CNTL_3
), 0x00000003, 0x82400024,
112 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE
), 0x3fffffff, 0x00000001,
113 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_LINE_STIPPLE_STATE
), 0x0000ff0f, 0x00000000,
114 SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_UTCL1_CNTL_0
), 0x08000000, 0x08000080,
115 SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_UTCL1_CNTL_1
), 0x08000000, 0x08000080,
116 SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_UTCL1_CNTL_2
), 0x08000000, 0x08000080,
117 SOC15_REG_OFFSET(GC
, 0, mmRLC_PREWALKER_UTCL1_CNTL
), 0x08000000, 0x08000080,
118 SOC15_REG_OFFSET(GC
, 0, mmRLC_SPM_UTCL1_CNTL
), 0x08000000, 0x08000080,
119 SOC15_REG_OFFSET(GC
, 0, mmSPI_CONFIG_CNTL_1
), 0x0000000f, 0x01000107,
120 SOC15_REG_OFFSET(GC
, 0, mmTA_CNTL_AUX
), 0xfffffeef, 0x010b0000,
121 SOC15_REG_OFFSET(GC
, 0, mmTCP_CHAN_STEER_HI
), 0xffffffff, 0x4a2c0e68,
122 SOC15_REG_OFFSET(GC
, 0, mmTCP_CHAN_STEER_LO
), 0xffffffff, 0xb5d3f197,
123 SOC15_REG_OFFSET(GC
, 0, mmVGT_CACHE_INVALIDATION
), 0x3fff3af3, 0x19200000,
124 SOC15_REG_OFFSET(GC
, 0, mmVGT_GS_MAX_WAVE_ID
), 0x00000fff, 0x000003ff,
125 SOC15_REG_OFFSET(GC
, 0, mmWD_UTCL1_CNTL
), 0x08000000, 0x08000080
128 static const u32 golden_settings_gc_9_0_vg10
[] =
130 SOC15_REG_OFFSET(GC
, 0, mmCB_HW_CONTROL
), 0x0000f000, 0x00012107,
131 SOC15_REG_OFFSET(GC
, 0, mmCB_HW_CONTROL_3
), 0x30000000, 0x10000000,
132 SOC15_REG_OFFSET(GC
, 0, mmGB_ADDR_CONFIG
), 0xffff77ff, 0x2a114042,
133 SOC15_REG_OFFSET(GC
, 0, mmGB_ADDR_CONFIG_READ
), 0xffff77ff, 0x2a114042,
134 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE_1
), 0x00008000, 0x00048000,
135 SOC15_REG_OFFSET(GC
, 0, mmRMI_UTCL1_CNTL2
), 0x00030000, 0x00020000,
136 SOC15_REG_OFFSET(GC
, 0, mmTD_CNTL
), 0x00001800, 0x00000800
139 static const u32 golden_settings_gc_9_1
[] =
141 SOC15_REG_OFFSET(GC
, 0, mmCB_HW_CONTROL
), 0xfffdf3cf, 0x00014104,
142 SOC15_REG_OFFSET(GC
, 0, mmCPC_UTCL1_CNTL
), 0x08000000, 0x08000080,
143 SOC15_REG_OFFSET(GC
, 0, mmCPF_UTCL1_CNTL
), 0x08000000, 0x08000080,
144 SOC15_REG_OFFSET(GC
, 0, mmCPG_UTCL1_CNTL
), 0x08000000, 0x08000080,
145 SOC15_REG_OFFSET(GC
, 0, mmDB_DEBUG2
), 0xf00fffff, 0x00000420,
146 SOC15_REG_OFFSET(GC
, 0, mmGB_GPU_ID
), 0x0000000f, 0x00000000,
147 SOC15_REG_OFFSET(GC
, 0, mmIA_UTCL1_CNTL
), 0x08000000, 0x08000080,
148 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_BINNER_EVENT_CNTL_3
), 0x00000003, 0x82400024,
149 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE
), 0x3fffffff, 0x00000001,
150 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_LINE_STIPPLE_STATE
), 0x0000ff0f, 0x00000000,
151 SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_UTCL1_CNTL_0
), 0x08000000, 0x08000080,
152 SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_UTCL1_CNTL_1
), 0x08000000, 0x08000080,
153 SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_UTCL1_CNTL_2
), 0x08000000, 0x08000080,
154 SOC15_REG_OFFSET(GC
, 0, mmRLC_PREWALKER_UTCL1_CNTL
), 0x08000000, 0x08000080,
155 SOC15_REG_OFFSET(GC
, 0, mmRLC_SPM_UTCL1_CNTL
), 0x08000000, 0x08000080,
156 SOC15_REG_OFFSET(GC
, 0, mmTA_CNTL_AUX
), 0xfffffeef, 0x010b0000,
157 SOC15_REG_OFFSET(GC
, 0, mmTCP_CHAN_STEER_HI
), 0xffffffff, 0x00000000,
158 SOC15_REG_OFFSET(GC
, 0, mmTCP_CHAN_STEER_LO
), 0xffffffff, 0x00003120,
159 SOC15_REG_OFFSET(GC
, 0, mmVGT_CACHE_INVALIDATION
), 0x3fff3af3, 0x19200000,
160 SOC15_REG_OFFSET(GC
, 0, mmVGT_GS_MAX_WAVE_ID
), 0x00000fff, 0x000000ff,
161 SOC15_REG_OFFSET(GC
, 0, mmWD_UTCL1_CNTL
), 0x08000000, 0x08000080
164 static const u32 golden_settings_gc_9_1_rv1
[] =
166 SOC15_REG_OFFSET(GC
, 0, mmCB_HW_CONTROL_3
), 0x30000000, 0x10000000,
167 SOC15_REG_OFFSET(GC
, 0, mmGB_ADDR_CONFIG
), 0xffff77ff, 0x24000042,
168 SOC15_REG_OFFSET(GC
, 0, mmGB_ADDR_CONFIG_READ
), 0xffff77ff, 0x24000042,
169 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_ENHANCE_1
), 0xffffffff, 0x04048000,
170 SOC15_REG_OFFSET(GC
, 0, mmPA_SC_MODE_CNTL_1
), 0x06000000, 0x06000000,
171 SOC15_REG_OFFSET(GC
, 0, mmRMI_UTCL1_CNTL2
), 0x00030000, 0x00020000,
172 SOC15_REG_OFFSET(GC
, 0, mmTD_CNTL
), 0x01bd9f33, 0x00000800
175 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
176 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
178 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device
*adev
);
179 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device
*adev
);
180 static void gfx_v9_0_set_gds_init(struct amdgpu_device
*adev
);
181 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device
*adev
);
182 static int gfx_v9_0_get_cu_info(struct amdgpu_device
*adev
,
183 struct amdgpu_cu_info
*cu_info
);
184 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device
*adev
);
185 static void gfx_v9_0_select_se_sh(struct amdgpu_device
*adev
, u32 se_num
, u32 sh_num
, u32 instance
);
186 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring
*ring
);
188 static void gfx_v9_0_init_golden_registers(struct amdgpu_device
*adev
)
190 switch (adev
->asic_type
) {
192 amdgpu_program_register_sequence(adev
,
193 golden_settings_gc_9_0
,
194 (const u32
)ARRAY_SIZE(golden_settings_gc_9_0
));
195 amdgpu_program_register_sequence(adev
,
196 golden_settings_gc_9_0_vg10
,
197 (const u32
)ARRAY_SIZE(golden_settings_gc_9_0_vg10
));
200 amdgpu_program_register_sequence(adev
,
201 golden_settings_gc_9_1
,
202 (const u32
)ARRAY_SIZE(golden_settings_gc_9_1
));
203 amdgpu_program_register_sequence(adev
,
204 golden_settings_gc_9_1_rv1
,
205 (const u32
)ARRAY_SIZE(golden_settings_gc_9_1_rv1
));
212 static void gfx_v9_0_scratch_init(struct amdgpu_device
*adev
)
214 adev
->gfx
.scratch
.num_reg
= 7;
215 adev
->gfx
.scratch
.reg_base
= SOC15_REG_OFFSET(GC
, 0, mmSCRATCH_REG0
);
216 adev
->gfx
.scratch
.free_mask
= (1u << adev
->gfx
.scratch
.num_reg
) - 1;
219 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring
*ring
, int eng_sel
,
220 bool wc
, uint32_t reg
, uint32_t val
)
222 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
223 amdgpu_ring_write(ring
, WRITE_DATA_ENGINE_SEL(eng_sel
) |
224 WRITE_DATA_DST_SEL(0) |
225 (wc
? WR_CONFIRM
: 0));
226 amdgpu_ring_write(ring
, reg
);
227 amdgpu_ring_write(ring
, 0);
228 amdgpu_ring_write(ring
, val
);
231 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring
*ring
, int eng_sel
,
232 int mem_space
, int opt
, uint32_t addr0
,
233 uint32_t addr1
, uint32_t ref
, uint32_t mask
,
236 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
237 amdgpu_ring_write(ring
,
238 /* memory (1) or register (0) */
239 (WAIT_REG_MEM_MEM_SPACE(mem_space
) |
240 WAIT_REG_MEM_OPERATION(opt
) | /* wait */
241 WAIT_REG_MEM_FUNCTION(3) | /* equal */
242 WAIT_REG_MEM_ENGINE(eng_sel
)));
245 BUG_ON(addr0
& 0x3); /* Dword align */
246 amdgpu_ring_write(ring
, addr0
);
247 amdgpu_ring_write(ring
, addr1
);
248 amdgpu_ring_write(ring
, ref
);
249 amdgpu_ring_write(ring
, mask
);
250 amdgpu_ring_write(ring
, inv
); /* poll interval */
253 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring
*ring
)
255 struct amdgpu_device
*adev
= ring
->adev
;
261 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
263 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r
);
266 WREG32(scratch
, 0xCAFEDEAD);
267 r
= amdgpu_ring_alloc(ring
, 3);
269 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
271 amdgpu_gfx_scratch_free(adev
, scratch
);
274 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_UCONFIG_REG
, 1));
275 amdgpu_ring_write(ring
, (scratch
- PACKET3_SET_UCONFIG_REG_START
));
276 amdgpu_ring_write(ring
, 0xDEADBEEF);
277 amdgpu_ring_commit(ring
);
279 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
280 tmp
= RREG32(scratch
);
281 if (tmp
== 0xDEADBEEF)
285 if (i
< adev
->usec_timeout
) {
286 DRM_INFO("ring test on %d succeeded in %d usecs\n",
289 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
290 ring
->idx
, scratch
, tmp
);
293 amdgpu_gfx_scratch_free(adev
, scratch
);
297 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring
*ring
, long timeout
)
299 struct amdgpu_device
*adev
= ring
->adev
;
301 struct dma_fence
*f
= NULL
;
306 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
308 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r
);
311 WREG32(scratch
, 0xCAFEDEAD);
312 memset(&ib
, 0, sizeof(ib
));
313 r
= amdgpu_ib_get(adev
, NULL
, 256, &ib
);
315 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r
);
318 ib
.ptr
[0] = PACKET3(PACKET3_SET_UCONFIG_REG
, 1);
319 ib
.ptr
[1] = ((scratch
- PACKET3_SET_UCONFIG_REG_START
));
320 ib
.ptr
[2] = 0xDEADBEEF;
323 r
= amdgpu_ib_schedule(ring
, 1, &ib
, NULL
, &f
);
327 r
= dma_fence_wait_timeout(f
, false, timeout
);
329 DRM_ERROR("amdgpu: IB test timed out.\n");
333 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r
);
336 tmp
= RREG32(scratch
);
337 if (tmp
== 0xDEADBEEF) {
338 DRM_INFO("ib test on ring %d succeeded\n", ring
->idx
);
341 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
346 amdgpu_ib_free(adev
, &ib
, NULL
);
349 amdgpu_gfx_scratch_free(adev
, scratch
);
353 static int gfx_v9_0_init_microcode(struct amdgpu_device
*adev
)
355 const char *chip_name
;
358 struct amdgpu_firmware_info
*info
= NULL
;
359 const struct common_firmware_header
*header
= NULL
;
360 const struct gfx_firmware_header_v1_0
*cp_hdr
;
361 const struct rlc_firmware_header_v2_0
*rlc_hdr
;
362 unsigned int *tmp
= NULL
;
367 switch (adev
->asic_type
) {
369 chip_name
= "vega10";
378 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_pfp.bin", chip_name
);
379 err
= request_firmware(&adev
->gfx
.pfp_fw
, fw_name
, adev
->dev
);
382 err
= amdgpu_ucode_validate(adev
->gfx
.pfp_fw
);
385 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
386 adev
->gfx
.pfp_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
387 adev
->gfx
.pfp_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
389 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_me.bin", chip_name
);
390 err
= request_firmware(&adev
->gfx
.me_fw
, fw_name
, adev
->dev
);
393 err
= amdgpu_ucode_validate(adev
->gfx
.me_fw
);
396 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
397 adev
->gfx
.me_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
398 adev
->gfx
.me_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
400 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_ce.bin", chip_name
);
401 err
= request_firmware(&adev
->gfx
.ce_fw
, fw_name
, adev
->dev
);
404 err
= amdgpu_ucode_validate(adev
->gfx
.ce_fw
);
407 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
408 adev
->gfx
.ce_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
409 adev
->gfx
.ce_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
411 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_rlc.bin", chip_name
);
412 err
= request_firmware(&adev
->gfx
.rlc_fw
, fw_name
, adev
->dev
);
415 err
= amdgpu_ucode_validate(adev
->gfx
.rlc_fw
);
416 rlc_hdr
= (const struct rlc_firmware_header_v2_0
*)adev
->gfx
.rlc_fw
->data
;
417 adev
->gfx
.rlc_fw_version
= le32_to_cpu(rlc_hdr
->header
.ucode_version
);
418 adev
->gfx
.rlc_feature_version
= le32_to_cpu(rlc_hdr
->ucode_feature_version
);
419 adev
->gfx
.rlc
.save_and_restore_offset
=
420 le32_to_cpu(rlc_hdr
->save_and_restore_offset
);
421 adev
->gfx
.rlc
.clear_state_descriptor_offset
=
422 le32_to_cpu(rlc_hdr
->clear_state_descriptor_offset
);
423 adev
->gfx
.rlc
.avail_scratch_ram_locations
=
424 le32_to_cpu(rlc_hdr
->avail_scratch_ram_locations
);
425 adev
->gfx
.rlc
.reg_restore_list_size
=
426 le32_to_cpu(rlc_hdr
->reg_restore_list_size
);
427 adev
->gfx
.rlc
.reg_list_format_start
=
428 le32_to_cpu(rlc_hdr
->reg_list_format_start
);
429 adev
->gfx
.rlc
.reg_list_format_separate_start
=
430 le32_to_cpu(rlc_hdr
->reg_list_format_separate_start
);
431 adev
->gfx
.rlc
.starting_offsets_start
=
432 le32_to_cpu(rlc_hdr
->starting_offsets_start
);
433 adev
->gfx
.rlc
.reg_list_format_size_bytes
=
434 le32_to_cpu(rlc_hdr
->reg_list_format_size_bytes
);
435 adev
->gfx
.rlc
.reg_list_size_bytes
=
436 le32_to_cpu(rlc_hdr
->reg_list_size_bytes
);
437 adev
->gfx
.rlc
.register_list_format
=
438 kmalloc(adev
->gfx
.rlc
.reg_list_format_size_bytes
+
439 adev
->gfx
.rlc
.reg_list_size_bytes
, GFP_KERNEL
);
440 if (!adev
->gfx
.rlc
.register_list_format
) {
445 tmp
= (unsigned int *)((uintptr_t)rlc_hdr
+
446 le32_to_cpu(rlc_hdr
->reg_list_format_array_offset_bytes
));
447 for (i
= 0 ; i
< (rlc_hdr
->reg_list_format_size_bytes
>> 2); i
++)
448 adev
->gfx
.rlc
.register_list_format
[i
] = le32_to_cpu(tmp
[i
]);
450 adev
->gfx
.rlc
.register_restore
= adev
->gfx
.rlc
.register_list_format
+ i
;
452 tmp
= (unsigned int *)((uintptr_t)rlc_hdr
+
453 le32_to_cpu(rlc_hdr
->reg_list_array_offset_bytes
));
454 for (i
= 0 ; i
< (rlc_hdr
->reg_list_size_bytes
>> 2); i
++)
455 adev
->gfx
.rlc
.register_restore
[i
] = le32_to_cpu(tmp
[i
]);
457 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mec.bin", chip_name
);
458 err
= request_firmware(&adev
->gfx
.mec_fw
, fw_name
, adev
->dev
);
461 err
= amdgpu_ucode_validate(adev
->gfx
.mec_fw
);
464 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
465 adev
->gfx
.mec_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
466 adev
->gfx
.mec_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
469 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mec2.bin", chip_name
);
470 err
= request_firmware(&adev
->gfx
.mec2_fw
, fw_name
, adev
->dev
);
472 err
= amdgpu_ucode_validate(adev
->gfx
.mec2_fw
);
475 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)
476 adev
->gfx
.mec2_fw
->data
;
477 adev
->gfx
.mec2_fw_version
=
478 le32_to_cpu(cp_hdr
->header
.ucode_version
);
479 adev
->gfx
.mec2_feature_version
=
480 le32_to_cpu(cp_hdr
->ucode_feature_version
);
483 adev
->gfx
.mec2_fw
= NULL
;
486 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
487 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_PFP
];
488 info
->ucode_id
= AMDGPU_UCODE_ID_CP_PFP
;
489 info
->fw
= adev
->gfx
.pfp_fw
;
490 header
= (const struct common_firmware_header
*)info
->fw
->data
;
491 adev
->firmware
.fw_size
+=
492 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
494 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_ME
];
495 info
->ucode_id
= AMDGPU_UCODE_ID_CP_ME
;
496 info
->fw
= adev
->gfx
.me_fw
;
497 header
= (const struct common_firmware_header
*)info
->fw
->data
;
498 adev
->firmware
.fw_size
+=
499 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
501 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_CE
];
502 info
->ucode_id
= AMDGPU_UCODE_ID_CP_CE
;
503 info
->fw
= adev
->gfx
.ce_fw
;
504 header
= (const struct common_firmware_header
*)info
->fw
->data
;
505 adev
->firmware
.fw_size
+=
506 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
508 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_RLC_G
];
509 info
->ucode_id
= AMDGPU_UCODE_ID_RLC_G
;
510 info
->fw
= adev
->gfx
.rlc_fw
;
511 header
= (const struct common_firmware_header
*)info
->fw
->data
;
512 adev
->firmware
.fw_size
+=
513 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
515 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_MEC1
];
516 info
->ucode_id
= AMDGPU_UCODE_ID_CP_MEC1
;
517 info
->fw
= adev
->gfx
.mec_fw
;
518 header
= (const struct common_firmware_header
*)info
->fw
->data
;
519 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)info
->fw
->data
;
520 adev
->firmware
.fw_size
+=
521 ALIGN(le32_to_cpu(header
->ucode_size_bytes
) - le32_to_cpu(cp_hdr
->jt_size
) * 4, PAGE_SIZE
);
523 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_MEC1_JT
];
524 info
->ucode_id
= AMDGPU_UCODE_ID_CP_MEC1_JT
;
525 info
->fw
= adev
->gfx
.mec_fw
;
526 adev
->firmware
.fw_size
+=
527 ALIGN(le32_to_cpu(cp_hdr
->jt_size
) * 4, PAGE_SIZE
);
529 if (adev
->gfx
.mec2_fw
) {
530 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_MEC2
];
531 info
->ucode_id
= AMDGPU_UCODE_ID_CP_MEC2
;
532 info
->fw
= adev
->gfx
.mec2_fw
;
533 header
= (const struct common_firmware_header
*)info
->fw
->data
;
534 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)info
->fw
->data
;
535 adev
->firmware
.fw_size
+=
536 ALIGN(le32_to_cpu(header
->ucode_size_bytes
) - le32_to_cpu(cp_hdr
->jt_size
) * 4, PAGE_SIZE
);
537 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_CP_MEC2_JT
];
538 info
->ucode_id
= AMDGPU_UCODE_ID_CP_MEC2_JT
;
539 info
->fw
= adev
->gfx
.mec2_fw
;
540 adev
->firmware
.fw_size
+=
541 ALIGN(le32_to_cpu(cp_hdr
->jt_size
) * 4, PAGE_SIZE
);
549 "gfx9: Failed to load firmware \"%s\"\n",
551 release_firmware(adev
->gfx
.pfp_fw
);
552 adev
->gfx
.pfp_fw
= NULL
;
553 release_firmware(adev
->gfx
.me_fw
);
554 adev
->gfx
.me_fw
= NULL
;
555 release_firmware(adev
->gfx
.ce_fw
);
556 adev
->gfx
.ce_fw
= NULL
;
557 release_firmware(adev
->gfx
.rlc_fw
);
558 adev
->gfx
.rlc_fw
= NULL
;
559 release_firmware(adev
->gfx
.mec_fw
);
560 adev
->gfx
.mec_fw
= NULL
;
561 release_firmware(adev
->gfx
.mec2_fw
);
562 adev
->gfx
.mec2_fw
= NULL
;
567 static u32
gfx_v9_0_get_csb_size(struct amdgpu_device
*adev
)
570 const struct cs_section_def
*sect
= NULL
;
571 const struct cs_extent_def
*ext
= NULL
;
573 /* begin clear state */
575 /* context control state */
578 for (sect
= gfx9_cs_data
; sect
->section
!= NULL
; ++sect
) {
579 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
580 if (sect
->id
== SECT_CONTEXT
)
581 count
+= 2 + ext
->reg_count
;
587 /* end clear state */
595 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device
*adev
,
596 volatile u32
*buffer
)
599 const struct cs_section_def
*sect
= NULL
;
600 const struct cs_extent_def
*ext
= NULL
;
602 if (adev
->gfx
.rlc
.cs_data
== NULL
)
607 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
608 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
610 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
611 buffer
[count
++] = cpu_to_le32(0x80000000);
612 buffer
[count
++] = cpu_to_le32(0x80000000);
614 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
615 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
616 if (sect
->id
== SECT_CONTEXT
) {
618 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG
, ext
->reg_count
));
619 buffer
[count
++] = cpu_to_le32(ext
->reg_index
-
620 PACKET3_SET_CONTEXT_REG_START
);
621 for (i
= 0; i
< ext
->reg_count
; i
++)
622 buffer
[count
++] = cpu_to_le32(ext
->extent
[i
]);
629 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
630 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE
);
632 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE
, 0));
633 buffer
[count
++] = cpu_to_le32(0);
636 static void gfx_v9_0_init_lbpw(struct amdgpu_device
*adev
)
640 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
641 WREG32_SOC15(GC
, 0, mmRLC_LB_THR_CONFIG_1
, 0x0000007F);
642 WREG32_SOC15(GC
, 0, mmRLC_LB_THR_CONFIG_2
, 0x0333A5A7);
643 WREG32_SOC15(GC
, 0, mmRLC_LB_THR_CONFIG_3
, 0x00000077);
644 WREG32_SOC15(GC
, 0, mmRLC_LB_THR_CONFIG_4
, (0x30 | 0x40 << 8 | 0x02FA << 16));
646 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
647 WREG32_SOC15(GC
, 0, mmRLC_LB_CNTR_INIT
, 0x00000000);
649 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
650 WREG32_SOC15(GC
, 0, mmRLC_LB_CNTR_MAX
, 0x00000500);
652 mutex_lock(&adev
->grbm_idx_mutex
);
653 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
654 gfx_v9_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
655 WREG32_SOC15(GC
, 0, mmRLC_LB_INIT_CU_MASK
, 0xffffffff);
657 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
658 data
= REG_SET_FIELD(0, RLC_LB_PARAMS
, FIFO_SAMPLES
, 0x0003);
659 data
|= REG_SET_FIELD(data
, RLC_LB_PARAMS
, PG_IDLE_SAMPLES
, 0x0010);
660 data
|= REG_SET_FIELD(data
, RLC_LB_PARAMS
, PG_IDLE_SAMPLE_INTERVAL
, 0x033F);
661 WREG32_SOC15(GC
, 0, mmRLC_LB_PARAMS
, data
);
663 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
664 data
= RREG32_SOC15(GC
, 0, mmRLC_GPM_GENERAL_7
);
667 WREG32_SOC15(GC
, 0, mmRLC_GPM_GENERAL_7
, data
);
669 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
670 WREG32_SOC15(GC
, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK
, 0xFFF);
672 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
673 * but used for RLC_LB_CNTL configuration */
674 data
= RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK
;
675 data
|= REG_SET_FIELD(data
, RLC_LB_CNTL
, CU_MASK_USED_OFF_HYST
, 0x09);
676 data
|= REG_SET_FIELD(data
, RLC_LB_CNTL
, RESERVED
, 0x80000);
677 WREG32_SOC15(GC
, 0, mmRLC_LB_CNTL
, data
);
678 mutex_unlock(&adev
->grbm_idx_mutex
);
681 static void gfx_v9_0_enable_lbpw(struct amdgpu_device
*adev
, bool enable
)
683 WREG32_FIELD15(GC
, 0, RLC_LB_CNTL
, LOAD_BALANCE_ENABLE
, enable
? 1 : 0);
686 static void rv_init_cp_jump_table(struct amdgpu_device
*adev
)
688 const __le32
*fw_data
;
689 volatile u32
*dst_ptr
;
690 int me
, i
, max_me
= 5;
692 u32 table_offset
, table_size
;
694 /* write the cp table buffer */
695 dst_ptr
= adev
->gfx
.rlc
.cp_table_ptr
;
696 for (me
= 0; me
< max_me
; me
++) {
698 const struct gfx_firmware_header_v1_0
*hdr
=
699 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
700 fw_data
= (const __le32
*)
701 (adev
->gfx
.ce_fw
->data
+
702 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
703 table_offset
= le32_to_cpu(hdr
->jt_offset
);
704 table_size
= le32_to_cpu(hdr
->jt_size
);
705 } else if (me
== 1) {
706 const struct gfx_firmware_header_v1_0
*hdr
=
707 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
708 fw_data
= (const __le32
*)
709 (adev
->gfx
.pfp_fw
->data
+
710 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
711 table_offset
= le32_to_cpu(hdr
->jt_offset
);
712 table_size
= le32_to_cpu(hdr
->jt_size
);
713 } else if (me
== 2) {
714 const struct gfx_firmware_header_v1_0
*hdr
=
715 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
716 fw_data
= (const __le32
*)
717 (adev
->gfx
.me_fw
->data
+
718 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
719 table_offset
= le32_to_cpu(hdr
->jt_offset
);
720 table_size
= le32_to_cpu(hdr
->jt_size
);
721 } else if (me
== 3) {
722 const struct gfx_firmware_header_v1_0
*hdr
=
723 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
724 fw_data
= (const __le32
*)
725 (adev
->gfx
.mec_fw
->data
+
726 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
727 table_offset
= le32_to_cpu(hdr
->jt_offset
);
728 table_size
= le32_to_cpu(hdr
->jt_size
);
729 } else if (me
== 4) {
730 const struct gfx_firmware_header_v1_0
*hdr
=
731 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec2_fw
->data
;
732 fw_data
= (const __le32
*)
733 (adev
->gfx
.mec2_fw
->data
+
734 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
735 table_offset
= le32_to_cpu(hdr
->jt_offset
);
736 table_size
= le32_to_cpu(hdr
->jt_size
);
739 for (i
= 0; i
< table_size
; i
++) {
740 dst_ptr
[bo_offset
+ i
] =
741 cpu_to_le32(le32_to_cpu(fw_data
[table_offset
+ i
]));
744 bo_offset
+= table_size
;
748 static void gfx_v9_0_rlc_fini(struct amdgpu_device
*adev
)
750 /* clear state block */
751 amdgpu_bo_free_kernel(&adev
->gfx
.rlc
.clear_state_obj
,
752 &adev
->gfx
.rlc
.clear_state_gpu_addr
,
753 (void **)&adev
->gfx
.rlc
.cs_ptr
);
755 /* jump table block */
756 amdgpu_bo_free_kernel(&adev
->gfx
.rlc
.cp_table_obj
,
757 &adev
->gfx
.rlc
.cp_table_gpu_addr
,
758 (void **)&adev
->gfx
.rlc
.cp_table_ptr
);
761 static int gfx_v9_0_rlc_init(struct amdgpu_device
*adev
)
763 volatile u32
*dst_ptr
;
765 const struct cs_section_def
*cs_data
;
768 adev
->gfx
.rlc
.cs_data
= gfx9_cs_data
;
770 cs_data
= adev
->gfx
.rlc
.cs_data
;
773 /* clear state block */
774 adev
->gfx
.rlc
.clear_state_size
= dws
= gfx_v9_0_get_csb_size(adev
);
775 if (adev
->gfx
.rlc
.clear_state_obj
== NULL
) {
776 r
= amdgpu_bo_create_kernel(adev
, dws
* 4, PAGE_SIZE
,
777 AMDGPU_GEM_DOMAIN_VRAM
,
778 &adev
->gfx
.rlc
.clear_state_obj
,
779 &adev
->gfx
.rlc
.clear_state_gpu_addr
,
780 (void **)&adev
->gfx
.rlc
.cs_ptr
);
783 "(%d) failed to create rlc csb bo\n", r
);
784 gfx_v9_0_rlc_fini(adev
);
788 /* set up the cs buffer */
789 dst_ptr
= adev
->gfx
.rlc
.cs_ptr
;
790 gfx_v9_0_get_csb_buffer(adev
, dst_ptr
);
791 amdgpu_bo_kunmap(adev
->gfx
.rlc
.clear_state_obj
);
792 amdgpu_bo_unreserve(adev
->gfx
.rlc
.clear_state_obj
);
795 if (adev
->asic_type
== CHIP_RAVEN
) {
796 /* TODO: double check the cp_table_size for RV */
797 adev
->gfx
.rlc
.cp_table_size
= ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
798 if (adev
->gfx
.rlc
.cp_table_obj
== NULL
) {
799 r
= amdgpu_bo_create_kernel(adev
, adev
->gfx
.rlc
.cp_table_size
,
800 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
,
801 &adev
->gfx
.rlc
.cp_table_obj
,
802 &adev
->gfx
.rlc
.cp_table_gpu_addr
,
803 (void **)&adev
->gfx
.rlc
.cp_table_ptr
);
806 "(%d) failed to create cp table bo\n", r
);
807 gfx_v9_0_rlc_fini(adev
);
812 rv_init_cp_jump_table(adev
);
813 amdgpu_bo_kunmap(adev
->gfx
.rlc
.cp_table_obj
);
814 amdgpu_bo_unreserve(adev
->gfx
.rlc
.cp_table_obj
);
816 gfx_v9_0_init_lbpw(adev
);
822 static void gfx_v9_0_mec_fini(struct amdgpu_device
*adev
)
826 if (adev
->gfx
.mec
.hpd_eop_obj
) {
827 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.hpd_eop_obj
, true);
828 if (unlikely(r
!= 0))
829 dev_warn(adev
->dev
, "(%d) reserve HPD EOP bo failed\n", r
);
830 amdgpu_bo_unpin(adev
->gfx
.mec
.hpd_eop_obj
);
831 amdgpu_bo_unreserve(adev
->gfx
.mec
.hpd_eop_obj
);
833 amdgpu_bo_unref(&adev
->gfx
.mec
.hpd_eop_obj
);
834 adev
->gfx
.mec
.hpd_eop_obj
= NULL
;
836 if (adev
->gfx
.mec
.mec_fw_obj
) {
837 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.mec_fw_obj
, true);
838 if (unlikely(r
!= 0))
839 dev_warn(adev
->dev
, "(%d) reserve mec firmware bo failed\n", r
);
840 amdgpu_bo_unpin(adev
->gfx
.mec
.mec_fw_obj
);
841 amdgpu_bo_unreserve(adev
->gfx
.mec
.mec_fw_obj
);
843 amdgpu_bo_unref(&adev
->gfx
.mec
.mec_fw_obj
);
844 adev
->gfx
.mec
.mec_fw_obj
= NULL
;
848 static int gfx_v9_0_mec_init(struct amdgpu_device
*adev
)
852 const __le32
*fw_data
;
857 const struct gfx_firmware_header_v1_0
*mec_hdr
;
859 bitmap_zero(adev
->gfx
.mec
.queue_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
861 /* take ownership of the relevant compute queues */
862 amdgpu_gfx_compute_queue_acquire(adev
);
863 mec_hpd_size
= adev
->gfx
.num_compute_rings
* GFX9_MEC_HPD_SIZE
;
865 if (adev
->gfx
.mec
.hpd_eop_obj
== NULL
) {
866 r
= amdgpu_bo_create(adev
,
869 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
870 &adev
->gfx
.mec
.hpd_eop_obj
);
872 dev_warn(adev
->dev
, "(%d) create HDP EOP bo failed\n", r
);
877 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.hpd_eop_obj
, false);
878 if (unlikely(r
!= 0)) {
879 gfx_v9_0_mec_fini(adev
);
882 r
= amdgpu_bo_pin(adev
->gfx
.mec
.hpd_eop_obj
, AMDGPU_GEM_DOMAIN_GTT
,
883 &adev
->gfx
.mec
.hpd_eop_gpu_addr
);
885 dev_warn(adev
->dev
, "(%d) pin HDP EOP bo failed\n", r
);
886 gfx_v9_0_mec_fini(adev
);
889 r
= amdgpu_bo_kmap(adev
->gfx
.mec
.hpd_eop_obj
, (void **)&hpd
);
891 dev_warn(adev
->dev
, "(%d) map HDP EOP bo failed\n", r
);
892 gfx_v9_0_mec_fini(adev
);
896 memset(hpd
, 0, adev
->gfx
.mec
.hpd_eop_obj
->tbo
.mem
.size
);
898 amdgpu_bo_kunmap(adev
->gfx
.mec
.hpd_eop_obj
);
899 amdgpu_bo_unreserve(adev
->gfx
.mec
.hpd_eop_obj
);
901 mec_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
903 fw_data
= (const __le32
*)
904 (adev
->gfx
.mec_fw
->data
+
905 le32_to_cpu(mec_hdr
->header
.ucode_array_offset_bytes
));
906 fw_size
= le32_to_cpu(mec_hdr
->header
.ucode_size_bytes
) / 4;
908 if (adev
->gfx
.mec
.mec_fw_obj
== NULL
) {
909 r
= amdgpu_bo_create(adev
,
910 mec_hdr
->header
.ucode_size_bytes
,
912 AMDGPU_GEM_DOMAIN_GTT
, 0, NULL
, NULL
,
913 &adev
->gfx
.mec
.mec_fw_obj
);
915 dev_warn(adev
->dev
, "(%d) create mec firmware bo failed\n", r
);
920 r
= amdgpu_bo_reserve(adev
->gfx
.mec
.mec_fw_obj
, false);
921 if (unlikely(r
!= 0)) {
922 gfx_v9_0_mec_fini(adev
);
925 r
= amdgpu_bo_pin(adev
->gfx
.mec
.mec_fw_obj
, AMDGPU_GEM_DOMAIN_GTT
,
926 &adev
->gfx
.mec
.mec_fw_gpu_addr
);
928 dev_warn(adev
->dev
, "(%d) pin mec firmware bo failed\n", r
);
929 gfx_v9_0_mec_fini(adev
);
932 r
= amdgpu_bo_kmap(adev
->gfx
.mec
.mec_fw_obj
, (void **)&fw
);
934 dev_warn(adev
->dev
, "(%d) map firmware bo failed\n", r
);
935 gfx_v9_0_mec_fini(adev
);
938 memcpy(fw
, fw_data
, fw_size
);
940 amdgpu_bo_kunmap(adev
->gfx
.mec
.mec_fw_obj
);
941 amdgpu_bo_unreserve(adev
->gfx
.mec
.mec_fw_obj
);
947 static uint32_t wave_read_ind(struct amdgpu_device
*adev
, uint32_t simd
, uint32_t wave
, uint32_t address
)
949 WREG32_SOC15(GC
, 0, mmSQ_IND_INDEX
,
950 (wave
<< SQ_IND_INDEX__WAVE_ID__SHIFT
) |
951 (simd
<< SQ_IND_INDEX__SIMD_ID__SHIFT
) |
952 (address
<< SQ_IND_INDEX__INDEX__SHIFT
) |
953 (SQ_IND_INDEX__FORCE_READ_MASK
));
954 return RREG32_SOC15(GC
, 0, mmSQ_IND_DATA
);
957 static void wave_read_regs(struct amdgpu_device
*adev
, uint32_t simd
,
958 uint32_t wave
, uint32_t thread
,
959 uint32_t regno
, uint32_t num
, uint32_t *out
)
961 WREG32_SOC15(GC
, 0, mmSQ_IND_INDEX
,
962 (wave
<< SQ_IND_INDEX__WAVE_ID__SHIFT
) |
963 (simd
<< SQ_IND_INDEX__SIMD_ID__SHIFT
) |
964 (regno
<< SQ_IND_INDEX__INDEX__SHIFT
) |
965 (thread
<< SQ_IND_INDEX__THREAD_ID__SHIFT
) |
966 (SQ_IND_INDEX__FORCE_READ_MASK
) |
967 (SQ_IND_INDEX__AUTO_INCR_MASK
));
969 *(out
++) = RREG32_SOC15(GC
, 0, mmSQ_IND_DATA
);
972 static void gfx_v9_0_read_wave_data(struct amdgpu_device
*adev
, uint32_t simd
, uint32_t wave
, uint32_t *dst
, int *no_fields
)
974 /* type 1 wave data */
975 dst
[(*no_fields
)++] = 1;
976 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_STATUS
);
977 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_PC_LO
);
978 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_PC_HI
);
979 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_EXEC_LO
);
980 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_EXEC_HI
);
981 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_HW_ID
);
982 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_INST_DW0
);
983 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_INST_DW1
);
984 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_GPR_ALLOC
);
985 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_LDS_ALLOC
);
986 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TRAPSTS
);
987 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_IB_STS
);
988 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_IB_DBG0
);
989 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_M0
);
992 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device
*adev
, uint32_t simd
,
993 uint32_t wave
, uint32_t start
,
994 uint32_t size
, uint32_t *dst
)
998 start
+ SQIND_WAVE_SGPRS_OFFSET
, size
, dst
);
1002 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs
= {
1003 .get_gpu_clock_counter
= &gfx_v9_0_get_gpu_clock_counter
,
1004 .select_se_sh
= &gfx_v9_0_select_se_sh
,
1005 .read_wave_data
= &gfx_v9_0_read_wave_data
,
1006 .read_wave_sgprs
= &gfx_v9_0_read_wave_sgprs
,
1009 static void gfx_v9_0_gpu_early_init(struct amdgpu_device
*adev
)
1013 adev
->gfx
.funcs
= &gfx_v9_0_gfx_funcs
;
1015 switch (adev
->asic_type
) {
1017 adev
->gfx
.config
.max_hw_contexts
= 8;
1018 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1019 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1020 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1021 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x4C0;
1022 gb_addr_config
= VEGA10_GB_ADDR_CONFIG_GOLDEN
;
1025 adev
->gfx
.config
.max_hw_contexts
= 8;
1026 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1027 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1028 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1029 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x4C0;
1030 gb_addr_config
= RAVEN_GB_ADDR_CONFIG_GOLDEN
;
1037 adev
->gfx
.config
.gb_addr_config
= gb_addr_config
;
1039 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
= 1 <<
1041 adev
->gfx
.config
.gb_addr_config
,
1045 adev
->gfx
.config
.max_tile_pipes
=
1046 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
1048 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
= 1 <<
1050 adev
->gfx
.config
.gb_addr_config
,
1053 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
= 1 <<
1055 adev
->gfx
.config
.gb_addr_config
,
1057 MAX_COMPRESSED_FRAGS
);
1058 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
= 1 <<
1060 adev
->gfx
.config
.gb_addr_config
,
1063 adev
->gfx
.config
.gb_addr_config_fields
.num_se
= 1 <<
1065 adev
->gfx
.config
.gb_addr_config
,
1067 NUM_SHADER_ENGINES
);
1068 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
= 1 << (8 +
1070 adev
->gfx
.config
.gb_addr_config
,
1072 PIPE_INTERLEAVE_SIZE
));
1075 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device
*adev
,
1076 struct amdgpu_ngg_buf
*ngg_buf
,
1078 int default_size_se
)
1083 dev_err(adev
->dev
, "Buffer size is invalid: %d\n", size_se
);
1086 size_se
= size_se
? size_se
: default_size_se
;
1088 ngg_buf
->size
= size_se
* adev
->gfx
.config
.max_shader_engines
;
1089 r
= amdgpu_bo_create_kernel(adev
, ngg_buf
->size
,
1090 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
,
1095 dev_err(adev
->dev
, "(%d) failed to create NGG buffer\n", r
);
1098 ngg_buf
->bo_size
= amdgpu_bo_size(ngg_buf
->bo
);
1103 static int gfx_v9_0_ngg_fini(struct amdgpu_device
*adev
)
1107 for (i
= 0; i
< NGG_BUF_MAX
; i
++)
1108 amdgpu_bo_free_kernel(&adev
->gfx
.ngg
.buf
[i
].bo
,
1109 &adev
->gfx
.ngg
.buf
[i
].gpu_addr
,
1112 memset(&adev
->gfx
.ngg
.buf
[0], 0,
1113 sizeof(struct amdgpu_ngg_buf
) * NGG_BUF_MAX
);
1115 adev
->gfx
.ngg
.init
= false;
1120 static int gfx_v9_0_ngg_init(struct amdgpu_device
*adev
)
1124 if (!amdgpu_ngg
|| adev
->gfx
.ngg
.init
== true)
1127 /* GDS reserve memory: 64 bytes alignment */
1128 adev
->gfx
.ngg
.gds_reserve_size
= ALIGN(5 * 4, 0x40);
1129 adev
->gds
.mem
.total_size
-= adev
->gfx
.ngg
.gds_reserve_size
;
1130 adev
->gds
.mem
.gfx_partition_size
-= adev
->gfx
.ngg
.gds_reserve_size
;
1131 adev
->gfx
.ngg
.gds_reserve_addr
= amdgpu_gds_reg_offset
[0].mem_base
;
1132 adev
->gfx
.ngg
.gds_reserve_addr
+= adev
->gds
.mem
.gfx_partition_size
;
1134 /* Primitive Buffer */
1135 r
= gfx_v9_0_ngg_create_buf(adev
, &adev
->gfx
.ngg
.buf
[NGG_PRIM
],
1136 amdgpu_prim_buf_per_se
,
1139 dev_err(adev
->dev
, "Failed to create Primitive Buffer\n");
1143 /* Position Buffer */
1144 r
= gfx_v9_0_ngg_create_buf(adev
, &adev
->gfx
.ngg
.buf
[NGG_POS
],
1145 amdgpu_pos_buf_per_se
,
1148 dev_err(adev
->dev
, "Failed to create Position Buffer\n");
1152 /* Control Sideband */
1153 r
= gfx_v9_0_ngg_create_buf(adev
, &adev
->gfx
.ngg
.buf
[NGG_CNTL
],
1154 amdgpu_cntl_sb_buf_per_se
,
1157 dev_err(adev
->dev
, "Failed to create Control Sideband Buffer\n");
1161 /* Parameter Cache, not created by default */
1162 if (amdgpu_param_buf_per_se
<= 0)
1165 r
= gfx_v9_0_ngg_create_buf(adev
, &adev
->gfx
.ngg
.buf
[NGG_PARAM
],
1166 amdgpu_param_buf_per_se
,
1169 dev_err(adev
->dev
, "Failed to create Parameter Cache\n");
1174 adev
->gfx
.ngg
.init
= true;
1177 gfx_v9_0_ngg_fini(adev
);
1181 static int gfx_v9_0_ngg_en(struct amdgpu_device
*adev
)
1183 struct amdgpu_ring
*ring
= &adev
->gfx
.gfx_ring
[0];
1192 /* Program buffer size */
1194 size
= adev
->gfx
.ngg
.buf
[NGG_PRIM
].size
/ 256;
1195 data
= REG_SET_FIELD(data
, WD_BUF_RESOURCE_1
, INDEX_BUF_SIZE
, size
);
1197 size
= adev
->gfx
.ngg
.buf
[NGG_POS
].size
/ 256;
1198 data
= REG_SET_FIELD(data
, WD_BUF_RESOURCE_1
, POS_BUF_SIZE
, size
);
1200 WREG32_SOC15(GC
, 0, mmWD_BUF_RESOURCE_1
, data
);
1203 size
= adev
->gfx
.ngg
.buf
[NGG_CNTL
].size
/ 256;
1204 data
= REG_SET_FIELD(data
, WD_BUF_RESOURCE_2
, CNTL_SB_BUF_SIZE
, size
);
1206 size
= adev
->gfx
.ngg
.buf
[NGG_PARAM
].size
/ 1024;
1207 data
= REG_SET_FIELD(data
, WD_BUF_RESOURCE_2
, PARAM_BUF_SIZE
, size
);
1209 WREG32_SOC15(GC
, 0, mmWD_BUF_RESOURCE_2
, data
);
1211 /* Program buffer base address */
1212 base
= lower_32_bits(adev
->gfx
.ngg
.buf
[NGG_PRIM
].gpu_addr
);
1213 data
= REG_SET_FIELD(0, WD_INDEX_BUF_BASE
, BASE
, base
);
1214 WREG32_SOC15(GC
, 0, mmWD_INDEX_BUF_BASE
, data
);
1216 base
= upper_32_bits(adev
->gfx
.ngg
.buf
[NGG_PRIM
].gpu_addr
);
1217 data
= REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI
, BASE_HI
, base
);
1218 WREG32_SOC15(GC
, 0, mmWD_INDEX_BUF_BASE_HI
, data
);
1220 base
= lower_32_bits(adev
->gfx
.ngg
.buf
[NGG_POS
].gpu_addr
);
1221 data
= REG_SET_FIELD(0, WD_POS_BUF_BASE
, BASE
, base
);
1222 WREG32_SOC15(GC
, 0, mmWD_POS_BUF_BASE
, data
);
1224 base
= upper_32_bits(adev
->gfx
.ngg
.buf
[NGG_POS
].gpu_addr
);
1225 data
= REG_SET_FIELD(0, WD_POS_BUF_BASE_HI
, BASE_HI
, base
);
1226 WREG32_SOC15(GC
, 0, mmWD_POS_BUF_BASE_HI
, data
);
1228 base
= lower_32_bits(adev
->gfx
.ngg
.buf
[NGG_CNTL
].gpu_addr
);
1229 data
= REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE
, BASE
, base
);
1230 WREG32_SOC15(GC
, 0, mmWD_CNTL_SB_BUF_BASE
, data
);
1232 base
= upper_32_bits(adev
->gfx
.ngg
.buf
[NGG_CNTL
].gpu_addr
);
1233 data
= REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI
, BASE_HI
, base
);
1234 WREG32_SOC15(GC
, 0, mmWD_CNTL_SB_BUF_BASE_HI
, data
);
1236 /* Clear GDS reserved memory */
1237 r
= amdgpu_ring_alloc(ring
, 17);
1239 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1244 gfx_v9_0_write_data_to_reg(ring
, 0, false,
1245 amdgpu_gds_reg_offset
[0].mem_size
,
1246 (adev
->gds
.mem
.total_size
+
1247 adev
->gfx
.ngg
.gds_reserve_size
) >>
1250 amdgpu_ring_write(ring
, PACKET3(PACKET3_DMA_DATA
, 5));
1251 amdgpu_ring_write(ring
, (PACKET3_DMA_DATA_CP_SYNC
|
1252 PACKET3_DMA_DATA_SRC_SEL(2)));
1253 amdgpu_ring_write(ring
, 0);
1254 amdgpu_ring_write(ring
, 0);
1255 amdgpu_ring_write(ring
, adev
->gfx
.ngg
.gds_reserve_addr
);
1256 amdgpu_ring_write(ring
, 0);
1257 amdgpu_ring_write(ring
, adev
->gfx
.ngg
.gds_reserve_size
);
1260 gfx_v9_0_write_data_to_reg(ring
, 0, false,
1261 amdgpu_gds_reg_offset
[0].mem_size
, 0);
1263 amdgpu_ring_commit(ring
);
1268 static int gfx_v9_0_compute_ring_init(struct amdgpu_device
*adev
, int ring_id
,
1269 int mec
, int pipe
, int queue
)
1273 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[ring_id
];
1275 ring
= &adev
->gfx
.compute_ring
[ring_id
];
1280 ring
->queue
= queue
;
1282 ring
->ring_obj
= NULL
;
1283 ring
->use_doorbell
= true;
1284 ring
->doorbell_index
= (AMDGPU_DOORBELL_MEC_RING0
+ ring_id
) << 1;
1285 ring
->eop_gpu_addr
= adev
->gfx
.mec
.hpd_eop_gpu_addr
1286 + (ring_id
* GFX9_MEC_HPD_SIZE
);
1287 sprintf(ring
->name
, "comp_%d.%d.%d", ring
->me
, ring
->pipe
, ring
->queue
);
1289 irq_type
= AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1290 + ((ring
->me
- 1) * adev
->gfx
.mec
.num_pipe_per_mec
)
1293 /* type-2 packets are deprecated on MEC, use type-3 instead */
1294 r
= amdgpu_ring_init(adev
, ring
, 1024,
1295 &adev
->gfx
.eop_irq
, irq_type
);
1303 static int gfx_v9_0_sw_init(void *handle
)
1305 int i
, j
, k
, r
, ring_id
;
1306 struct amdgpu_ring
*ring
;
1307 struct amdgpu_kiq
*kiq
;
1308 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1310 switch (adev
->asic_type
) {
1313 adev
->gfx
.mec
.num_mec
= 2;
1316 adev
->gfx
.mec
.num_mec
= 1;
1320 adev
->gfx
.mec
.num_pipe_per_mec
= 4;
1321 adev
->gfx
.mec
.num_queue_per_pipe
= 8;
1324 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_GRBM_CP
, 178, &adev
->gfx
.kiq
.irq
);
1329 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_GRBM_CP
, 181, &adev
->gfx
.eop_irq
);
1333 /* Privileged reg */
1334 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_GRBM_CP
, 184,
1335 &adev
->gfx
.priv_reg_irq
);
1339 /* Privileged inst */
1340 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_GRBM_CP
, 185,
1341 &adev
->gfx
.priv_inst_irq
);
1345 adev
->gfx
.gfx_current_status
= AMDGPU_GFX_NORMAL_MODE
;
1347 gfx_v9_0_scratch_init(adev
);
1349 r
= gfx_v9_0_init_microcode(adev
);
1351 DRM_ERROR("Failed to load gfx firmware!\n");
1355 r
= gfx_v9_0_rlc_init(adev
);
1357 DRM_ERROR("Failed to init rlc BOs!\n");
1361 r
= gfx_v9_0_mec_init(adev
);
1363 DRM_ERROR("Failed to init MEC BOs!\n");
1367 /* set up the gfx ring */
1368 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++) {
1369 ring
= &adev
->gfx
.gfx_ring
[i
];
1370 ring
->ring_obj
= NULL
;
1371 sprintf(ring
->name
, "gfx");
1372 ring
->use_doorbell
= true;
1373 ring
->doorbell_index
= AMDGPU_DOORBELL64_GFX_RING0
<< 1;
1374 r
= amdgpu_ring_init(adev
, ring
, 1024,
1375 &adev
->gfx
.eop_irq
, AMDGPU_CP_IRQ_GFX_EOP
);
1380 /* set up the compute queues - allocate horizontally across pipes */
1382 for (i
= 0; i
< adev
->gfx
.mec
.num_mec
; ++i
) {
1383 for (j
= 0; j
< adev
->gfx
.mec
.num_queue_per_pipe
; j
++) {
1384 for (k
= 0; k
< adev
->gfx
.mec
.num_pipe_per_mec
; k
++) {
1385 if (!amdgpu_gfx_is_mec_queue_enabled(adev
, i
, k
, j
))
1388 r
= gfx_v9_0_compute_ring_init(adev
,
1399 r
= amdgpu_gfx_kiq_init(adev
, GFX9_MEC_HPD_SIZE
);
1401 DRM_ERROR("Failed to init KIQ BOs!\n");
1405 kiq
= &adev
->gfx
.kiq
;
1406 r
= amdgpu_gfx_kiq_init_ring(adev
, &kiq
->ring
, &kiq
->irq
);
1410 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1411 r
= amdgpu_gfx_compute_mqd_sw_init(adev
, sizeof(struct v9_mqd
));
1415 /* reserve GDS, GWS and OA resource for gfx */
1416 r
= amdgpu_bo_create_kernel(adev
, adev
->gds
.mem
.gfx_partition_size
,
1417 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GDS
,
1418 &adev
->gds
.gds_gfx_bo
, NULL
, NULL
);
1422 r
= amdgpu_bo_create_kernel(adev
, adev
->gds
.gws
.gfx_partition_size
,
1423 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GWS
,
1424 &adev
->gds
.gws_gfx_bo
, NULL
, NULL
);
1428 r
= amdgpu_bo_create_kernel(adev
, adev
->gds
.oa
.gfx_partition_size
,
1429 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_OA
,
1430 &adev
->gds
.oa_gfx_bo
, NULL
, NULL
);
1434 adev
->gfx
.ce_ram_size
= 0x8000;
1436 gfx_v9_0_gpu_early_init(adev
);
1438 r
= gfx_v9_0_ngg_init(adev
);
1446 static int gfx_v9_0_sw_fini(void *handle
)
1449 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1451 amdgpu_bo_free_kernel(&adev
->gds
.oa_gfx_bo
, NULL
, NULL
);
1452 amdgpu_bo_free_kernel(&adev
->gds
.gws_gfx_bo
, NULL
, NULL
);
1453 amdgpu_bo_free_kernel(&adev
->gds
.gds_gfx_bo
, NULL
, NULL
);
1455 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
1456 amdgpu_ring_fini(&adev
->gfx
.gfx_ring
[i
]);
1457 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
1458 amdgpu_ring_fini(&adev
->gfx
.compute_ring
[i
]);
1460 amdgpu_gfx_compute_mqd_sw_fini(adev
);
1461 amdgpu_gfx_kiq_free_ring(&adev
->gfx
.kiq
.ring
, &adev
->gfx
.kiq
.irq
);
1462 amdgpu_gfx_kiq_fini(adev
);
1464 gfx_v9_0_mec_fini(adev
);
1465 gfx_v9_0_ngg_fini(adev
);
1471 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device
*adev
)
1476 static void gfx_v9_0_select_se_sh(struct amdgpu_device
*adev
, u32 se_num
, u32 sh_num
, u32 instance
)
1480 if (instance
== 0xffffffff)
1481 data
= REG_SET_FIELD(0, GRBM_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
, 1);
1483 data
= REG_SET_FIELD(0, GRBM_GFX_INDEX
, INSTANCE_INDEX
, instance
);
1485 if (se_num
== 0xffffffff)
1486 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SE_BROADCAST_WRITES
, 1);
1488 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SE_INDEX
, se_num
);
1490 if (sh_num
== 0xffffffff)
1491 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SH_BROADCAST_WRITES
, 1);
1493 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
, SH_INDEX
, sh_num
);
1495 WREG32_SOC15(GC
, 0, mmGRBM_GFX_INDEX
, data
);
1498 static u32
gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device
*adev
)
1502 data
= RREG32_SOC15(GC
, 0, mmCC_RB_BACKEND_DISABLE
);
1503 data
|= RREG32_SOC15(GC
, 0, mmGC_USER_RB_BACKEND_DISABLE
);
1505 data
&= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK
;
1506 data
>>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT
;
1508 mask
= amdgpu_gfx_create_bitmask(adev
->gfx
.config
.max_backends_per_se
/
1509 adev
->gfx
.config
.max_sh_per_se
);
1511 return (~data
) & mask
;
1514 static void gfx_v9_0_setup_rb(struct amdgpu_device
*adev
)
1519 u32 rb_bitmap_width_per_sh
= adev
->gfx
.config
.max_backends_per_se
/
1520 adev
->gfx
.config
.max_sh_per_se
;
1522 mutex_lock(&adev
->grbm_idx_mutex
);
1523 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1524 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1525 gfx_v9_0_select_se_sh(adev
, i
, j
, 0xffffffff);
1526 data
= gfx_v9_0_get_rb_active_bitmap(adev
);
1527 active_rbs
|= data
<< ((i
* adev
->gfx
.config
.max_sh_per_se
+ j
) *
1528 rb_bitmap_width_per_sh
);
1531 gfx_v9_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1532 mutex_unlock(&adev
->grbm_idx_mutex
);
1534 adev
->gfx
.config
.backend_enable_mask
= active_rbs
;
1535 adev
->gfx
.config
.num_rbs
= hweight32(active_rbs
);
1538 #define DEFAULT_SH_MEM_BASES (0x6000)
1539 #define FIRST_COMPUTE_VMID (8)
1540 #define LAST_COMPUTE_VMID (16)
1541 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device
*adev
)
1544 uint32_t sh_mem_config
;
1545 uint32_t sh_mem_bases
;
1548 * Configure apertures:
1549 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1550 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1551 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1553 sh_mem_bases
= DEFAULT_SH_MEM_BASES
| (DEFAULT_SH_MEM_BASES
<< 16);
1555 sh_mem_config
= SH_MEM_ADDRESS_MODE_64
|
1556 SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
1557 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
;
1559 mutex_lock(&adev
->srbm_mutex
);
1560 for (i
= FIRST_COMPUTE_VMID
; i
< LAST_COMPUTE_VMID
; i
++) {
1561 soc15_grbm_select(adev
, 0, 0, 0, i
);
1562 /* CP and shaders */
1563 WREG32_SOC15(GC
, 0, mmSH_MEM_CONFIG
, sh_mem_config
);
1564 WREG32_SOC15(GC
, 0, mmSH_MEM_BASES
, sh_mem_bases
);
1566 soc15_grbm_select(adev
, 0, 0, 0, 0);
1567 mutex_unlock(&adev
->srbm_mutex
);
1570 static void gfx_v9_0_gpu_init(struct amdgpu_device
*adev
)
1575 WREG32_FIELD15(GC
, 0, GRBM_CNTL
, READ_TIMEOUT
, 0xff);
1577 gfx_v9_0_tiling_mode_table_init(adev
);
1579 gfx_v9_0_setup_rb(adev
);
1580 gfx_v9_0_get_cu_info(adev
, &adev
->gfx
.cu_info
);
1582 /* XXX SH_MEM regs */
1583 /* where to put LDS, scratch, GPUVM in FSA64 space */
1584 mutex_lock(&adev
->srbm_mutex
);
1585 for (i
= 0; i
< 16; i
++) {
1586 soc15_grbm_select(adev
, 0, 0, 0, i
);
1587 /* CP and shaders */
1589 tmp
= REG_SET_FIELD(tmp
, SH_MEM_CONFIG
, ALIGNMENT_MODE
,
1590 SH_MEM_ALIGNMENT_MODE_UNALIGNED
);
1591 WREG32_SOC15(GC
, 0, mmSH_MEM_CONFIG
, tmp
);
1592 WREG32_SOC15(GC
, 0, mmSH_MEM_BASES
, 0);
1594 soc15_grbm_select(adev
, 0, 0, 0, 0);
1596 mutex_unlock(&adev
->srbm_mutex
);
1598 gfx_v9_0_init_compute_vmid(adev
);
1600 mutex_lock(&adev
->grbm_idx_mutex
);
1602 * making sure that the following register writes will be broadcasted
1603 * to all the shaders
1605 gfx_v9_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1607 WREG32_SOC15(GC
, 0, mmPA_SC_FIFO_SIZE
,
1608 (adev
->gfx
.config
.sc_prim_fifo_size_frontend
<<
1609 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT
) |
1610 (adev
->gfx
.config
.sc_prim_fifo_size_backend
<<
1611 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT
) |
1612 (adev
->gfx
.config
.sc_hiz_tile_fifo_size
<<
1613 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT
) |
1614 (adev
->gfx
.config
.sc_earlyz_tile_fifo_size
<<
1615 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT
));
1616 mutex_unlock(&adev
->grbm_idx_mutex
);
1620 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device
*adev
)
1625 mutex_lock(&adev
->grbm_idx_mutex
);
1626 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1627 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1628 gfx_v9_0_select_se_sh(adev
, i
, j
, 0xffffffff);
1629 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
1630 if (RREG32_SOC15(GC
, 0, mmRLC_SERDES_CU_MASTER_BUSY
) == 0)
1636 gfx_v9_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1637 mutex_unlock(&adev
->grbm_idx_mutex
);
1639 mask
= RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK
|
1640 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK
|
1641 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK
|
1642 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK
;
1643 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
1644 if ((RREG32_SOC15(GC
, 0, mmRLC_SERDES_NONCU_MASTER_BUSY
) & mask
) == 0)
1650 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device
*adev
,
1653 u32 tmp
= RREG32_SOC15(GC
, 0, mmCP_INT_CNTL_RING0
);
1655 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CNTX_BUSY_INT_ENABLE
, enable
? 1 : 0);
1656 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CNTX_EMPTY_INT_ENABLE
, enable
? 1 : 0);
1657 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, CMP_BUSY_INT_ENABLE
, enable
? 1 : 0);
1658 tmp
= REG_SET_FIELD(tmp
, CP_INT_CNTL_RING0
, GFX_IDLE_INT_ENABLE
, enable
? 1 : 0);
1660 WREG32_SOC15(GC
, 0, mmCP_INT_CNTL_RING0
, tmp
);
1663 static void gfx_v9_0_init_csb(struct amdgpu_device
*adev
)
1666 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_CSIB_ADDR_HI
),
1667 adev
->gfx
.rlc
.clear_state_gpu_addr
>> 32);
1668 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_CSIB_ADDR_LO
),
1669 adev
->gfx
.rlc
.clear_state_gpu_addr
& 0xfffffffc);
1670 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_CSIB_LENGTH
),
1671 adev
->gfx
.rlc
.clear_state_size
);
1674 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format
,
1675 int indirect_offset
,
1677 int *unique_indirect_regs
,
1678 int *unique_indirect_reg_count
,
1679 int max_indirect_reg_count
,
1680 int *indirect_start_offsets
,
1681 int *indirect_start_offsets_count
,
1682 int max_indirect_start_offsets_count
)
1685 bool new_entry
= true;
1687 for (; indirect_offset
< list_size
; indirect_offset
++) {
1691 indirect_start_offsets
[*indirect_start_offsets_count
] = indirect_offset
;
1692 *indirect_start_offsets_count
= *indirect_start_offsets_count
+ 1;
1693 BUG_ON(*indirect_start_offsets_count
>= max_indirect_start_offsets_count
);
1696 if (register_list_format
[indirect_offset
] == 0xFFFFFFFF) {
1701 indirect_offset
+= 2;
1703 /* look for the matching indice */
1704 for (idx
= 0; idx
< *unique_indirect_reg_count
; idx
++) {
1705 if (unique_indirect_regs
[idx
] ==
1706 register_list_format
[indirect_offset
])
1710 if (idx
>= *unique_indirect_reg_count
) {
1711 unique_indirect_regs
[*unique_indirect_reg_count
] =
1712 register_list_format
[indirect_offset
];
1713 idx
= *unique_indirect_reg_count
;
1714 *unique_indirect_reg_count
= *unique_indirect_reg_count
+ 1;
1715 BUG_ON(*unique_indirect_reg_count
>= max_indirect_reg_count
);
1718 register_list_format
[indirect_offset
] = idx
;
1722 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device
*adev
)
1724 int unique_indirect_regs
[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1725 int unique_indirect_reg_count
= 0;
1727 int indirect_start_offsets
[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1728 int indirect_start_offsets_count
= 0;
1734 u32
*register_list_format
=
1735 kmalloc(adev
->gfx
.rlc
.reg_list_format_size_bytes
, GFP_KERNEL
);
1736 if (!register_list_format
)
1738 memcpy(register_list_format
, adev
->gfx
.rlc
.register_list_format
,
1739 adev
->gfx
.rlc
.reg_list_format_size_bytes
);
1741 /* setup unique_indirect_regs array and indirect_start_offsets array */
1742 gfx_v9_0_parse_ind_reg_list(register_list_format
,
1743 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH
,
1744 adev
->gfx
.rlc
.reg_list_format_size_bytes
>> 2,
1745 unique_indirect_regs
,
1746 &unique_indirect_reg_count
,
1747 sizeof(unique_indirect_regs
)/sizeof(int),
1748 indirect_start_offsets
,
1749 &indirect_start_offsets_count
,
1750 sizeof(indirect_start_offsets
)/sizeof(int));
1752 /* enable auto inc in case it is disabled */
1753 tmp
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_CNTL
));
1754 tmp
|= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK
;
1755 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_CNTL
), tmp
);
1757 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1758 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_ARAM_ADDR
),
1759 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET
);
1760 for (i
= 0; i
< adev
->gfx
.rlc
.reg_list_size_bytes
>> 2; i
++)
1761 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_ARAM_DATA
),
1762 adev
->gfx
.rlc
.register_restore
[i
]);
1764 /* load direct register */
1765 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_ARAM_ADDR
), 0);
1766 for (i
= 0; i
< adev
->gfx
.rlc
.reg_list_size_bytes
>> 2; i
++)
1767 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_ARAM_DATA
),
1768 adev
->gfx
.rlc
.register_restore
[i
]);
1770 /* load indirect register */
1771 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_SCRATCH_ADDR
),
1772 adev
->gfx
.rlc
.reg_list_format_start
);
1773 for (i
= 0; i
< adev
->gfx
.rlc
.reg_list_format_size_bytes
>> 2; i
++)
1774 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_SCRATCH_DATA
),
1775 register_list_format
[i
]);
1777 /* set save/restore list size */
1778 list_size
= adev
->gfx
.rlc
.reg_list_size_bytes
>> 2;
1779 list_size
= list_size
>> 1;
1780 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_SCRATCH_ADDR
),
1781 adev
->gfx
.rlc
.reg_restore_list_size
);
1782 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_SCRATCH_DATA
), list_size
);
1784 /* write the starting offsets to RLC scratch ram */
1785 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_SCRATCH_ADDR
),
1786 adev
->gfx
.rlc
.starting_offsets_start
);
1787 for (i
= 0; i
< sizeof(indirect_start_offsets
)/sizeof(int); i
++)
1788 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_GPM_SCRATCH_DATA
),
1789 indirect_start_offsets
[i
]);
1791 /* load unique indirect regs*/
1792 for (i
= 0; i
< sizeof(unique_indirect_regs
)/sizeof(int); i
++) {
1793 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0
) + i
,
1794 unique_indirect_regs
[i
] & 0x3FFFF);
1795 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_INDEX_CNTL_DATA_0
) + i
,
1796 unique_indirect_regs
[i
] >> 20);
1799 kfree(register_list_format
);
1803 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device
*adev
)
1807 tmp
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_CNTL
));
1808 tmp
|= RLC_SRM_CNTL__SRM_ENABLE_MASK
;
1809 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_SRM_CNTL
), tmp
);
1812 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device
*adev
,
1816 uint32_t default_data
= 0;
1818 default_data
= data
= RREG32(SOC15_REG_OFFSET(PWR
, 0, mmPWR_MISC_CNTL_STATUS
));
1819 if (enable
== true) {
1820 /* enable GFXIP control over CGPG */
1821 data
|= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK
;
1822 if(default_data
!= data
)
1823 WREG32(SOC15_REG_OFFSET(PWR
, 0, mmPWR_MISC_CNTL_STATUS
), data
);
1826 data
&= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK
;
1827 data
|= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT
);
1828 if(default_data
!= data
)
1829 WREG32(SOC15_REG_OFFSET(PWR
, 0, mmPWR_MISC_CNTL_STATUS
), data
);
1831 /* restore GFXIP control over GCPG */
1832 data
&= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK
;
1833 if(default_data
!= data
)
1834 WREG32(SOC15_REG_OFFSET(PWR
, 0, mmPWR_MISC_CNTL_STATUS
), data
);
1838 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device
*adev
)
1842 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
1843 AMD_PG_SUPPORT_GFX_SMG
|
1844 AMD_PG_SUPPORT_GFX_DMG
)) {
1845 /* init IDLE_POLL_COUNT = 60 */
1846 data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmCP_RB_WPTR_POLL_CNTL
));
1847 data
&= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK
;
1848 data
|= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT
);
1849 WREG32(SOC15_REG_OFFSET(GC
, 0, mmCP_RB_WPTR_POLL_CNTL
), data
);
1851 /* init RLC PG Delay */
1853 data
|= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT
);
1854 data
|= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT
);
1855 data
|= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT
);
1856 data
|= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT
);
1857 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_DELAY
), data
);
1859 data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_DELAY_2
));
1860 data
&= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK
;
1861 data
|= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT
);
1862 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_DELAY_2
), data
);
1864 data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_DELAY_3
));
1865 data
&= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK
;
1866 data
|= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT
);
1867 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_DELAY_3
), data
);
1869 data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_AUTO_PG_CTRL
));
1870 data
&= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK
;
1872 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
1873 data
|= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT
);
1874 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_AUTO_PG_CTRL
), data
);
1876 pwr_10_0_gfxip_control_over_cgpg(adev
, true);
1880 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device
*adev
,
1884 uint32_t default_data
= 0;
1886 default_data
= data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
));
1888 if (enable
== true) {
1889 data
|= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK
;
1890 if (default_data
!= data
)
1891 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1893 data
&= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK
;
1894 if(default_data
!= data
)
1895 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1899 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device
*adev
,
1903 uint32_t default_data
= 0;
1905 default_data
= data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
));
1907 if (enable
== true) {
1908 data
|= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK
;
1909 if(default_data
!= data
)
1910 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1912 data
&= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK
;
1913 if(default_data
!= data
)
1914 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1918 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device
*adev
,
1922 uint32_t default_data
= 0;
1924 default_data
= data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
));
1926 if (enable
== true) {
1927 data
&= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK
;
1928 if(default_data
!= data
)
1929 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1931 data
|= RLC_PG_CNTL__CP_PG_DISABLE_MASK
;
1932 if(default_data
!= data
)
1933 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1937 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device
*adev
,
1940 uint32_t data
, default_data
;
1942 default_data
= data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
));
1944 data
|= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK
;
1946 data
&= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK
;
1947 if(default_data
!= data
)
1948 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1951 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device
*adev
,
1954 uint32_t data
, default_data
;
1956 default_data
= data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
));
1958 data
|= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK
;
1960 data
&= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK
;
1961 if(default_data
!= data
)
1962 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1965 /* read any GFX register to wake up GFX */
1966 data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmDB_RENDER_CONTROL
));
1969 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device
*adev
,
1972 uint32_t data
, default_data
;
1974 default_data
= data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
));
1976 data
|= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
1978 data
&= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
1979 if(default_data
!= data
)
1980 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1983 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device
*adev
,
1986 uint32_t data
, default_data
;
1988 default_data
= data
= RREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
));
1990 data
|= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
1992 data
&= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
1993 if(default_data
!= data
)
1994 WREG32(SOC15_REG_OFFSET(GC
, 0, mmRLC_PG_CNTL
), data
);
1997 static void gfx_v9_0_init_pg(struct amdgpu_device
*adev
)
1999 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
2000 AMD_PG_SUPPORT_GFX_SMG
|
2001 AMD_PG_SUPPORT_GFX_DMG
|
2003 AMD_PG_SUPPORT_GDS
|
2004 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
2005 gfx_v9_0_init_csb(adev
);
2006 gfx_v9_0_init_rlc_save_restore_list(adev
);
2007 gfx_v9_0_enable_save_restore_machine(adev
);
2009 if (adev
->asic_type
== CHIP_RAVEN
) {
2010 WREG32(mmRLC_JUMP_TABLE_RESTORE
,
2011 adev
->gfx
.rlc
.cp_table_gpu_addr
>> 8);
2012 gfx_v9_0_init_gfx_power_gating(adev
);
2014 if (adev
->pg_flags
& AMD_PG_SUPPORT_RLC_SMU_HS
) {
2015 gfx_v9_0_enable_sck_slow_down_on_power_up(adev
, true);
2016 gfx_v9_0_enable_sck_slow_down_on_power_down(adev
, true);
2018 gfx_v9_0_enable_sck_slow_down_on_power_up(adev
, false);
2019 gfx_v9_0_enable_sck_slow_down_on_power_down(adev
, false);
2022 if (adev
->pg_flags
& AMD_PG_SUPPORT_CP
)
2023 gfx_v9_0_enable_cp_power_gating(adev
, true);
2025 gfx_v9_0_enable_cp_power_gating(adev
, false);
2030 void gfx_v9_0_rlc_stop(struct amdgpu_device
*adev
)
2032 u32 tmp
= RREG32_SOC15(GC
, 0, mmRLC_CNTL
);
2034 tmp
= REG_SET_FIELD(tmp
, RLC_CNTL
, RLC_ENABLE_F32
, 0);
2035 WREG32_SOC15(GC
, 0, mmRLC_CNTL
, tmp
);
2037 gfx_v9_0_enable_gui_idle_interrupt(adev
, false);
2039 gfx_v9_0_wait_for_rlc_serdes(adev
);
2042 static void gfx_v9_0_rlc_reset(struct amdgpu_device
*adev
)
2044 WREG32_FIELD15(GC
, 0, GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 1);
2046 WREG32_FIELD15(GC
, 0, GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 0);
2050 static void gfx_v9_0_rlc_start(struct amdgpu_device
*adev
)
2052 #ifdef AMDGPU_RLC_DEBUG_RETRY
2056 WREG32_FIELD15(GC
, 0, RLC_CNTL
, RLC_ENABLE_F32
, 1);
2058 /* carrizo do enable cp interrupt after cp inited */
2059 if (!(adev
->flags
& AMD_IS_APU
))
2060 gfx_v9_0_enable_gui_idle_interrupt(adev
, true);
2064 #ifdef AMDGPU_RLC_DEBUG_RETRY
2065 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2066 rlc_ucode_ver
= RREG32_SOC15(GC
, 0, mmRLC_GPM_GENERAL_6
);
2067 if(rlc_ucode_ver
== 0x108) {
2068 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2069 rlc_ucode_ver
, adev
->gfx
.rlc_fw_version
);
2070 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2071 * default is 0x9C4 to create a 100us interval */
2072 WREG32_SOC15(GC
, 0, mmRLC_GPM_TIMER_INT_3
, 0x9C4);
2073 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2074 * to disable the page fault retry interrupts, default is
2076 WREG32_SOC15(GC
, 0, mmRLC_GPM_GENERAL_12
, 0x100);
2081 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device
*adev
)
2083 const struct rlc_firmware_header_v2_0
*hdr
;
2084 const __le32
*fw_data
;
2085 unsigned i
, fw_size
;
2087 if (!adev
->gfx
.rlc_fw
)
2090 hdr
= (const struct rlc_firmware_header_v2_0
*)adev
->gfx
.rlc_fw
->data
;
2091 amdgpu_ucode_print_rlc_hdr(&hdr
->header
);
2093 fw_data
= (const __le32
*)(adev
->gfx
.rlc_fw
->data
+
2094 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
2095 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
2097 WREG32_SOC15(GC
, 0, mmRLC_GPM_UCODE_ADDR
,
2098 RLCG_UCODE_LOADING_START_ADDRESS
);
2099 for (i
= 0; i
< fw_size
; i
++)
2100 WREG32_SOC15(GC
, 0, mmRLC_GPM_UCODE_DATA
, le32_to_cpup(fw_data
++));
2101 WREG32_SOC15(GC
, 0, mmRLC_GPM_UCODE_ADDR
, adev
->gfx
.rlc_fw_version
);
2106 static int gfx_v9_0_rlc_resume(struct amdgpu_device
*adev
)
2110 if (amdgpu_sriov_vf(adev
))
2113 gfx_v9_0_rlc_stop(adev
);
2116 WREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL
, 0);
2119 WREG32_SOC15(GC
, 0, mmRLC_PG_CNTL
, 0);
2121 gfx_v9_0_rlc_reset(adev
);
2123 gfx_v9_0_init_pg(adev
);
2125 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
2126 /* legacy rlc firmware loading */
2127 r
= gfx_v9_0_rlc_load_microcode(adev
);
2132 if (adev
->asic_type
== CHIP_RAVEN
) {
2133 if (amdgpu_lbpw
!= 0)
2134 gfx_v9_0_enable_lbpw(adev
, true);
2136 gfx_v9_0_enable_lbpw(adev
, false);
2139 gfx_v9_0_rlc_start(adev
);
2144 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device
*adev
, bool enable
)
2147 u32 tmp
= RREG32_SOC15(GC
, 0, mmCP_ME_CNTL
);
2149 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, ME_HALT
, enable
? 0 : 1);
2150 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, PFP_HALT
, enable
? 0 : 1);
2151 tmp
= REG_SET_FIELD(tmp
, CP_ME_CNTL
, CE_HALT
, enable
? 0 : 1);
2153 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
2154 adev
->gfx
.gfx_ring
[i
].ready
= false;
2156 WREG32_SOC15(GC
, 0, mmCP_ME_CNTL
, tmp
);
2160 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device
*adev
)
2162 const struct gfx_firmware_header_v1_0
*pfp_hdr
;
2163 const struct gfx_firmware_header_v1_0
*ce_hdr
;
2164 const struct gfx_firmware_header_v1_0
*me_hdr
;
2165 const __le32
*fw_data
;
2166 unsigned i
, fw_size
;
2168 if (!adev
->gfx
.me_fw
|| !adev
->gfx
.pfp_fw
|| !adev
->gfx
.ce_fw
)
2171 pfp_hdr
= (const struct gfx_firmware_header_v1_0
*)
2172 adev
->gfx
.pfp_fw
->data
;
2173 ce_hdr
= (const struct gfx_firmware_header_v1_0
*)
2174 adev
->gfx
.ce_fw
->data
;
2175 me_hdr
= (const struct gfx_firmware_header_v1_0
*)
2176 adev
->gfx
.me_fw
->data
;
2178 amdgpu_ucode_print_gfx_hdr(&pfp_hdr
->header
);
2179 amdgpu_ucode_print_gfx_hdr(&ce_hdr
->header
);
2180 amdgpu_ucode_print_gfx_hdr(&me_hdr
->header
);
2182 gfx_v9_0_cp_gfx_enable(adev
, false);
2185 fw_data
= (const __le32
*)
2186 (adev
->gfx
.pfp_fw
->data
+
2187 le32_to_cpu(pfp_hdr
->header
.ucode_array_offset_bytes
));
2188 fw_size
= le32_to_cpu(pfp_hdr
->header
.ucode_size_bytes
) / 4;
2189 WREG32_SOC15(GC
, 0, mmCP_PFP_UCODE_ADDR
, 0);
2190 for (i
= 0; i
< fw_size
; i
++)
2191 WREG32_SOC15(GC
, 0, mmCP_PFP_UCODE_DATA
, le32_to_cpup(fw_data
++));
2192 WREG32_SOC15(GC
, 0, mmCP_PFP_UCODE_ADDR
, adev
->gfx
.pfp_fw_version
);
2195 fw_data
= (const __le32
*)
2196 (adev
->gfx
.ce_fw
->data
+
2197 le32_to_cpu(ce_hdr
->header
.ucode_array_offset_bytes
));
2198 fw_size
= le32_to_cpu(ce_hdr
->header
.ucode_size_bytes
) / 4;
2199 WREG32_SOC15(GC
, 0, mmCP_CE_UCODE_ADDR
, 0);
2200 for (i
= 0; i
< fw_size
; i
++)
2201 WREG32_SOC15(GC
, 0, mmCP_CE_UCODE_DATA
, le32_to_cpup(fw_data
++));
2202 WREG32_SOC15(GC
, 0, mmCP_CE_UCODE_ADDR
, adev
->gfx
.ce_fw_version
);
2205 fw_data
= (const __le32
*)
2206 (adev
->gfx
.me_fw
->data
+
2207 le32_to_cpu(me_hdr
->header
.ucode_array_offset_bytes
));
2208 fw_size
= le32_to_cpu(me_hdr
->header
.ucode_size_bytes
) / 4;
2209 WREG32_SOC15(GC
, 0, mmCP_ME_RAM_WADDR
, 0);
2210 for (i
= 0; i
< fw_size
; i
++)
2211 WREG32_SOC15(GC
, 0, mmCP_ME_RAM_DATA
, le32_to_cpup(fw_data
++));
2212 WREG32_SOC15(GC
, 0, mmCP_ME_RAM_WADDR
, adev
->gfx
.me_fw_version
);
2217 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device
*adev
)
2219 struct amdgpu_ring
*ring
= &adev
->gfx
.gfx_ring
[0];
2220 const struct cs_section_def
*sect
= NULL
;
2221 const struct cs_extent_def
*ext
= NULL
;
2225 WREG32_SOC15(GC
, 0, mmCP_MAX_CONTEXT
, adev
->gfx
.config
.max_hw_contexts
- 1);
2226 WREG32_SOC15(GC
, 0, mmCP_DEVICE_ID
, 1);
2228 gfx_v9_0_cp_gfx_enable(adev
, true);
2230 r
= amdgpu_ring_alloc(ring
, gfx_v9_0_get_csb_size(adev
) + 4);
2232 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r
);
2236 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2237 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
2239 amdgpu_ring_write(ring
, PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
2240 amdgpu_ring_write(ring
, 0x80000000);
2241 amdgpu_ring_write(ring
, 0x80000000);
2243 for (sect
= gfx9_cs_data
; sect
->section
!= NULL
; ++sect
) {
2244 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
2245 if (sect
->id
== SECT_CONTEXT
) {
2246 amdgpu_ring_write(ring
,
2247 PACKET3(PACKET3_SET_CONTEXT_REG
,
2249 amdgpu_ring_write(ring
,
2250 ext
->reg_index
- PACKET3_SET_CONTEXT_REG_START
);
2251 for (i
= 0; i
< ext
->reg_count
; i
++)
2252 amdgpu_ring_write(ring
, ext
->extent
[i
]);
2257 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2258 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
2260 amdgpu_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
2261 amdgpu_ring_write(ring
, 0);
2263 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_BASE
, 2));
2264 amdgpu_ring_write(ring
, PACKET3_BASE_INDEX(CE_PARTITION_BASE
));
2265 amdgpu_ring_write(ring
, 0x8000);
2266 amdgpu_ring_write(ring
, 0x8000);
2268 amdgpu_ring_commit(ring
);
2273 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device
*adev
)
2275 struct amdgpu_ring
*ring
;
2278 u64 rb_addr
, rptr_addr
, wptr_gpu_addr
;
2280 /* Set the write pointer delay */
2281 WREG32_SOC15(GC
, 0, mmCP_RB_WPTR_DELAY
, 0);
2283 /* set the RB to use vmid 0 */
2284 WREG32_SOC15(GC
, 0, mmCP_RB_VMID
, 0);
2286 /* Set ring buffer size */
2287 ring
= &adev
->gfx
.gfx_ring
[0];
2288 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2289 tmp
= REG_SET_FIELD(0, CP_RB0_CNTL
, RB_BUFSZ
, rb_bufsz
);
2290 tmp
= REG_SET_FIELD(tmp
, CP_RB0_CNTL
, RB_BLKSZ
, rb_bufsz
- 2);
2292 tmp
= REG_SET_FIELD(tmp
, CP_RB0_CNTL
, BUF_SWAP
, 1);
2294 WREG32_SOC15(GC
, 0, mmCP_RB0_CNTL
, tmp
);
2296 /* Initialize the ring buffer's write pointers */
2298 WREG32_SOC15(GC
, 0, mmCP_RB0_WPTR
, lower_32_bits(ring
->wptr
));
2299 WREG32_SOC15(GC
, 0, mmCP_RB0_WPTR_HI
, upper_32_bits(ring
->wptr
));
2301 /* set the wb address wether it's enabled or not */
2302 rptr_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2303 WREG32_SOC15(GC
, 0, mmCP_RB0_RPTR_ADDR
, lower_32_bits(rptr_addr
));
2304 WREG32_SOC15(GC
, 0, mmCP_RB0_RPTR_ADDR_HI
, upper_32_bits(rptr_addr
) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK
);
2306 wptr_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->wptr_offs
* 4);
2307 WREG32_SOC15(GC
, 0, mmCP_RB_WPTR_POLL_ADDR_LO
, lower_32_bits(wptr_gpu_addr
));
2308 WREG32_SOC15(GC
, 0, mmCP_RB_WPTR_POLL_ADDR_HI
, upper_32_bits(wptr_gpu_addr
));
2311 WREG32_SOC15(GC
, 0, mmCP_RB0_CNTL
, tmp
);
2313 rb_addr
= ring
->gpu_addr
>> 8;
2314 WREG32_SOC15(GC
, 0, mmCP_RB0_BASE
, rb_addr
);
2315 WREG32_SOC15(GC
, 0, mmCP_RB0_BASE_HI
, upper_32_bits(rb_addr
));
2317 tmp
= RREG32_SOC15(GC
, 0, mmCP_RB_DOORBELL_CONTROL
);
2318 if (ring
->use_doorbell
) {
2319 tmp
= REG_SET_FIELD(tmp
, CP_RB_DOORBELL_CONTROL
,
2320 DOORBELL_OFFSET
, ring
->doorbell_index
);
2321 tmp
= REG_SET_FIELD(tmp
, CP_RB_DOORBELL_CONTROL
,
2324 tmp
= REG_SET_FIELD(tmp
, CP_RB_DOORBELL_CONTROL
, DOORBELL_EN
, 0);
2326 WREG32_SOC15(GC
, 0, mmCP_RB_DOORBELL_CONTROL
, tmp
);
2328 tmp
= REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER
,
2329 DOORBELL_RANGE_LOWER
, ring
->doorbell_index
);
2330 WREG32_SOC15(GC
, 0, mmCP_RB_DOORBELL_RANGE_LOWER
, tmp
);
2332 WREG32_SOC15(GC
, 0, mmCP_RB_DOORBELL_RANGE_UPPER
,
2333 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK
);
2336 /* start the ring */
2337 gfx_v9_0_cp_gfx_start(adev
);
2343 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device
*adev
, bool enable
)
2348 WREG32_SOC15(GC
, 0, mmCP_MEC_CNTL
, 0);
2350 WREG32_SOC15(GC
, 0, mmCP_MEC_CNTL
,
2351 (CP_MEC_CNTL__MEC_ME1_HALT_MASK
| CP_MEC_CNTL__MEC_ME2_HALT_MASK
));
2352 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
2353 adev
->gfx
.compute_ring
[i
].ready
= false;
2354 adev
->gfx
.kiq
.ring
.ready
= false;
2359 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device
*adev
)
2361 const struct gfx_firmware_header_v1_0
*mec_hdr
;
2362 const __le32
*fw_data
;
2366 if (!adev
->gfx
.mec_fw
)
2369 gfx_v9_0_cp_compute_enable(adev
, false);
2371 mec_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
2372 amdgpu_ucode_print_gfx_hdr(&mec_hdr
->header
);
2374 fw_data
= (const __le32
*)
2375 (adev
->gfx
.mec_fw
->data
+
2376 le32_to_cpu(mec_hdr
->header
.ucode_array_offset_bytes
));
2378 tmp
= REG_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, VMID
, 0);
2379 tmp
= REG_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, CACHE_POLICY
, 0);
2380 WREG32_SOC15(GC
, 0, mmCP_CPC_IC_BASE_CNTL
, tmp
);
2382 WREG32_SOC15(GC
, 0, mmCP_CPC_IC_BASE_LO
,
2383 adev
->gfx
.mec
.mec_fw_gpu_addr
& 0xFFFFF000);
2384 WREG32_SOC15(GC
, 0, mmCP_CPC_IC_BASE_HI
,
2385 upper_32_bits(adev
->gfx
.mec
.mec_fw_gpu_addr
));
2388 WREG32_SOC15(GC
, 0, mmCP_MEC_ME1_UCODE_ADDR
,
2389 mec_hdr
->jt_offset
);
2390 for (i
= 0; i
< mec_hdr
->jt_size
; i
++)
2391 WREG32_SOC15(GC
, 0, mmCP_MEC_ME1_UCODE_DATA
,
2392 le32_to_cpup(fw_data
+ mec_hdr
->jt_offset
+ i
));
2394 WREG32_SOC15(GC
, 0, mmCP_MEC_ME1_UCODE_ADDR
,
2395 adev
->gfx
.mec_fw_version
);
2396 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2402 static void gfx_v9_0_kiq_setting(struct amdgpu_ring
*ring
)
2405 struct amdgpu_device
*adev
= ring
->adev
;
2407 /* tell RLC which is KIQ queue */
2408 tmp
= RREG32_SOC15(GC
, 0, mmRLC_CP_SCHEDULERS
);
2410 tmp
|= (ring
->me
<< 5) | (ring
->pipe
<< 3) | (ring
->queue
);
2411 WREG32_SOC15(GC
, 0, mmRLC_CP_SCHEDULERS
, tmp
);
2413 WREG32_SOC15(GC
, 0, mmRLC_CP_SCHEDULERS
, tmp
);
2416 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device
*adev
)
2418 struct amdgpu_ring
*kiq_ring
= &adev
->gfx
.kiq
.ring
;
2419 uint32_t scratch
, tmp
= 0;
2420 uint64_t queue_mask
= 0;
2423 for (i
= 0; i
< AMDGPU_MAX_COMPUTE_QUEUES
; ++i
) {
2424 if (!test_bit(i
, adev
->gfx
.mec
.queue_bitmap
))
2427 /* This situation may be hit in the future if a new HW
2428 * generation exposes more than 64 queues. If so, the
2429 * definition of queue_mask needs updating */
2430 if (WARN_ON(i
> (sizeof(queue_mask
)*8))) {
2431 DRM_ERROR("Invalid KCQ enabled: %d\n", i
);
2435 queue_mask
|= (1ull << i
);
2438 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
2440 DRM_ERROR("Failed to get scratch reg (%d).\n", r
);
2443 WREG32(scratch
, 0xCAFEDEAD);
2445 r
= amdgpu_ring_alloc(kiq_ring
, (7 * adev
->gfx
.num_compute_rings
) + 11);
2447 DRM_ERROR("Failed to lock KIQ (%d).\n", r
);
2448 amdgpu_gfx_scratch_free(adev
, scratch
);
2453 amdgpu_ring_write(kiq_ring
, PACKET3(PACKET3_SET_RESOURCES
, 6));
2454 amdgpu_ring_write(kiq_ring
, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2455 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2456 amdgpu_ring_write(kiq_ring
, lower_32_bits(queue_mask
)); /* queue mask lo */
2457 amdgpu_ring_write(kiq_ring
, upper_32_bits(queue_mask
)); /* queue mask hi */
2458 amdgpu_ring_write(kiq_ring
, 0); /* gws mask lo */
2459 amdgpu_ring_write(kiq_ring
, 0); /* gws mask hi */
2460 amdgpu_ring_write(kiq_ring
, 0); /* oac mask */
2461 amdgpu_ring_write(kiq_ring
, 0); /* gds heap base:0, gds heap size:0 */
2462 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
2463 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[i
];
2464 uint64_t mqd_addr
= amdgpu_bo_gpu_offset(ring
->mqd_obj
);
2465 uint64_t wptr_addr
= adev
->wb
.gpu_addr
+ (ring
->wptr_offs
* 4);
2467 amdgpu_ring_write(kiq_ring
, PACKET3(PACKET3_MAP_QUEUES
, 5));
2468 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2469 amdgpu_ring_write(kiq_ring
, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2470 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2471 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2472 PACKET3_MAP_QUEUES_QUEUE(ring
->queue
) |
2473 PACKET3_MAP_QUEUES_PIPE(ring
->pipe
) |
2474 PACKET3_MAP_QUEUES_ME((ring
->me
== 1 ? 0 : 1)) |
2475 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2476 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
2477 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2478 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2479 amdgpu_ring_write(kiq_ring
, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring
->doorbell_index
));
2480 amdgpu_ring_write(kiq_ring
, lower_32_bits(mqd_addr
));
2481 amdgpu_ring_write(kiq_ring
, upper_32_bits(mqd_addr
));
2482 amdgpu_ring_write(kiq_ring
, lower_32_bits(wptr_addr
));
2483 amdgpu_ring_write(kiq_ring
, upper_32_bits(wptr_addr
));
2485 /* write to scratch for completion */
2486 amdgpu_ring_write(kiq_ring
, PACKET3(PACKET3_SET_UCONFIG_REG
, 1));
2487 amdgpu_ring_write(kiq_ring
, (scratch
- PACKET3_SET_UCONFIG_REG_START
));
2488 amdgpu_ring_write(kiq_ring
, 0xDEADBEEF);
2489 amdgpu_ring_commit(kiq_ring
);
2491 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2492 tmp
= RREG32(scratch
);
2493 if (tmp
== 0xDEADBEEF)
2497 if (i
>= adev
->usec_timeout
) {
2498 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2502 amdgpu_gfx_scratch_free(adev
, scratch
);
2507 static int gfx_v9_0_mqd_init(struct amdgpu_ring
*ring
)
2509 struct amdgpu_device
*adev
= ring
->adev
;
2510 struct v9_mqd
*mqd
= ring
->mqd_ptr
;
2511 uint64_t hqd_gpu_addr
, wb_gpu_addr
, eop_base_addr
;
2514 mqd
->header
= 0xC0310800;
2515 mqd
->compute_pipelinestat_enable
= 0x00000001;
2516 mqd
->compute_static_thread_mgmt_se0
= 0xffffffff;
2517 mqd
->compute_static_thread_mgmt_se1
= 0xffffffff;
2518 mqd
->compute_static_thread_mgmt_se2
= 0xffffffff;
2519 mqd
->compute_static_thread_mgmt_se3
= 0xffffffff;
2520 mqd
->compute_misc_reserved
= 0x00000003;
2522 eop_base_addr
= ring
->eop_gpu_addr
>> 8;
2523 mqd
->cp_hqd_eop_base_addr_lo
= eop_base_addr
;
2524 mqd
->cp_hqd_eop_base_addr_hi
= upper_32_bits(eop_base_addr
);
2526 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2527 tmp
= RREG32_SOC15(GC
, 0, mmCP_HQD_EOP_CONTROL
);
2528 tmp
= REG_SET_FIELD(tmp
, CP_HQD_EOP_CONTROL
, EOP_SIZE
,
2529 (order_base_2(GFX9_MEC_HPD_SIZE
/ 4) - 1));
2531 mqd
->cp_hqd_eop_control
= tmp
;
2533 /* enable doorbell? */
2534 tmp
= RREG32_SOC15(GC
, 0, mmCP_HQD_PQ_DOORBELL_CONTROL
);
2536 if (ring
->use_doorbell
) {
2537 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2538 DOORBELL_OFFSET
, ring
->doorbell_index
);
2539 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2541 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2542 DOORBELL_SOURCE
, 0);
2543 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2547 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2550 mqd
->cp_hqd_pq_doorbell_control
= tmp
;
2552 /* disable the queue if it's active */
2554 mqd
->cp_hqd_dequeue_request
= 0;
2555 mqd
->cp_hqd_pq_rptr
= 0;
2556 mqd
->cp_hqd_pq_wptr_lo
= 0;
2557 mqd
->cp_hqd_pq_wptr_hi
= 0;
2559 /* set the pointer to the MQD */
2560 mqd
->cp_mqd_base_addr_lo
= ring
->mqd_gpu_addr
& 0xfffffffc;
2561 mqd
->cp_mqd_base_addr_hi
= upper_32_bits(ring
->mqd_gpu_addr
);
2563 /* set MQD vmid to 0 */
2564 tmp
= RREG32_SOC15(GC
, 0, mmCP_MQD_CONTROL
);
2565 tmp
= REG_SET_FIELD(tmp
, CP_MQD_CONTROL
, VMID
, 0);
2566 mqd
->cp_mqd_control
= tmp
;
2568 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2569 hqd_gpu_addr
= ring
->gpu_addr
>> 8;
2570 mqd
->cp_hqd_pq_base_lo
= hqd_gpu_addr
;
2571 mqd
->cp_hqd_pq_base_hi
= upper_32_bits(hqd_gpu_addr
);
2573 /* set up the HQD, this is similar to CP_RB0_CNTL */
2574 tmp
= RREG32_SOC15(GC
, 0, mmCP_HQD_PQ_CONTROL
);
2575 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, QUEUE_SIZE
,
2576 (order_base_2(ring
->ring_size
/ 4) - 1));
2577 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, RPTR_BLOCK_SIZE
,
2578 ((order_base_2(AMDGPU_GPU_PAGE_SIZE
/ 4) - 1) << 8));
2580 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, ENDIAN_SWAP
, 1);
2582 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, UNORD_DISPATCH
, 0);
2583 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, ROQ_PQ_IB_FLIP
, 0);
2584 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, PRIV_STATE
, 1);
2585 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_CONTROL
, KMD_QUEUE
, 1);
2586 mqd
->cp_hqd_pq_control
= tmp
;
2588 /* set the wb address whether it's enabled or not */
2589 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2590 mqd
->cp_hqd_pq_rptr_report_addr_lo
= wb_gpu_addr
& 0xfffffffc;
2591 mqd
->cp_hqd_pq_rptr_report_addr_hi
=
2592 upper_32_bits(wb_gpu_addr
) & 0xffff;
2594 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2595 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->wptr_offs
* 4);
2596 mqd
->cp_hqd_pq_wptr_poll_addr_lo
= wb_gpu_addr
& 0xfffffffc;
2597 mqd
->cp_hqd_pq_wptr_poll_addr_hi
= upper_32_bits(wb_gpu_addr
) & 0xffff;
2600 /* enable the doorbell if requested */
2601 if (ring
->use_doorbell
) {
2602 tmp
= RREG32_SOC15(GC
, 0, mmCP_HQD_PQ_DOORBELL_CONTROL
);
2603 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2604 DOORBELL_OFFSET
, ring
->doorbell_index
);
2606 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2608 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2609 DOORBELL_SOURCE
, 0);
2610 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PQ_DOORBELL_CONTROL
,
2614 mqd
->cp_hqd_pq_doorbell_control
= tmp
;
2616 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2618 mqd
->cp_hqd_pq_rptr
= RREG32_SOC15(GC
, 0, mmCP_HQD_PQ_RPTR
);
2620 /* set the vmid for the queue */
2621 mqd
->cp_hqd_vmid
= 0;
2623 tmp
= RREG32_SOC15(GC
, 0, mmCP_HQD_PERSISTENT_STATE
);
2624 tmp
= REG_SET_FIELD(tmp
, CP_HQD_PERSISTENT_STATE
, PRELOAD_SIZE
, 0x53);
2625 mqd
->cp_hqd_persistent_state
= tmp
;
2627 /* set MIN_IB_AVAIL_SIZE */
2628 tmp
= RREG32_SOC15(GC
, 0, mmCP_HQD_IB_CONTROL
);
2629 tmp
= REG_SET_FIELD(tmp
, CP_HQD_IB_CONTROL
, MIN_IB_AVAIL_SIZE
, 3);
2630 mqd
->cp_hqd_ib_control
= tmp
;
2632 /* activate the queue */
2633 mqd
->cp_hqd_active
= 1;
2638 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring
*ring
)
2640 struct amdgpu_device
*adev
= ring
->adev
;
2641 struct v9_mqd
*mqd
= ring
->mqd_ptr
;
2644 /* disable wptr polling */
2645 WREG32_FIELD15(GC
, 0, CP_PQ_WPTR_POLL_CNTL
, EN
, 0);
2647 WREG32_SOC15(GC
, 0, mmCP_HQD_EOP_BASE_ADDR
,
2648 mqd
->cp_hqd_eop_base_addr_lo
);
2649 WREG32_SOC15(GC
, 0, mmCP_HQD_EOP_BASE_ADDR_HI
,
2650 mqd
->cp_hqd_eop_base_addr_hi
);
2652 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2653 WREG32_SOC15(GC
, 0, mmCP_HQD_EOP_CONTROL
,
2654 mqd
->cp_hqd_eop_control
);
2656 /* enable doorbell? */
2657 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_DOORBELL_CONTROL
,
2658 mqd
->cp_hqd_pq_doorbell_control
);
2660 /* disable the queue if it's active */
2661 if (RREG32_SOC15(GC
, 0, mmCP_HQD_ACTIVE
) & 1) {
2662 WREG32_SOC15(GC
, 0, mmCP_HQD_DEQUEUE_REQUEST
, 1);
2663 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
2664 if (!(RREG32_SOC15(GC
, 0, mmCP_HQD_ACTIVE
) & 1))
2668 WREG32_SOC15(GC
, 0, mmCP_HQD_DEQUEUE_REQUEST
,
2669 mqd
->cp_hqd_dequeue_request
);
2670 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_RPTR
,
2671 mqd
->cp_hqd_pq_rptr
);
2672 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_WPTR_LO
,
2673 mqd
->cp_hqd_pq_wptr_lo
);
2674 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_WPTR_HI
,
2675 mqd
->cp_hqd_pq_wptr_hi
);
2678 /* set the pointer to the MQD */
2679 WREG32_SOC15(GC
, 0, mmCP_MQD_BASE_ADDR
,
2680 mqd
->cp_mqd_base_addr_lo
);
2681 WREG32_SOC15(GC
, 0, mmCP_MQD_BASE_ADDR_HI
,
2682 mqd
->cp_mqd_base_addr_hi
);
2684 /* set MQD vmid to 0 */
2685 WREG32_SOC15(GC
, 0, mmCP_MQD_CONTROL
,
2686 mqd
->cp_mqd_control
);
2688 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2689 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_BASE
,
2690 mqd
->cp_hqd_pq_base_lo
);
2691 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_BASE_HI
,
2692 mqd
->cp_hqd_pq_base_hi
);
2694 /* set up the HQD, this is similar to CP_RB0_CNTL */
2695 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_CONTROL
,
2696 mqd
->cp_hqd_pq_control
);
2698 /* set the wb address whether it's enabled or not */
2699 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR
,
2700 mqd
->cp_hqd_pq_rptr_report_addr_lo
);
2701 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI
,
2702 mqd
->cp_hqd_pq_rptr_report_addr_hi
);
2704 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2705 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR
,
2706 mqd
->cp_hqd_pq_wptr_poll_addr_lo
);
2707 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI
,
2708 mqd
->cp_hqd_pq_wptr_poll_addr_hi
);
2710 /* enable the doorbell if requested */
2711 if (ring
->use_doorbell
) {
2712 WREG32_SOC15(GC
, 0, mmCP_MEC_DOORBELL_RANGE_LOWER
,
2713 (AMDGPU_DOORBELL64_KIQ
*2) << 2);
2714 WREG32_SOC15(GC
, 0, mmCP_MEC_DOORBELL_RANGE_UPPER
,
2715 (AMDGPU_DOORBELL64_USERQUEUE_END
* 2) << 2);
2718 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_DOORBELL_CONTROL
,
2719 mqd
->cp_hqd_pq_doorbell_control
);
2721 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2722 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_WPTR_LO
,
2723 mqd
->cp_hqd_pq_wptr_lo
);
2724 WREG32_SOC15(GC
, 0, mmCP_HQD_PQ_WPTR_HI
,
2725 mqd
->cp_hqd_pq_wptr_hi
);
2727 /* set the vmid for the queue */
2728 WREG32_SOC15(GC
, 0, mmCP_HQD_VMID
, mqd
->cp_hqd_vmid
);
2730 WREG32_SOC15(GC
, 0, mmCP_HQD_PERSISTENT_STATE
,
2731 mqd
->cp_hqd_persistent_state
);
2733 /* activate the queue */
2734 WREG32_SOC15(GC
, 0, mmCP_HQD_ACTIVE
,
2735 mqd
->cp_hqd_active
);
2737 if (ring
->use_doorbell
)
2738 WREG32_FIELD15(GC
, 0, CP_PQ_STATUS
, DOORBELL_ENABLE
, 1);
2743 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring
*ring
)
2745 struct amdgpu_device
*adev
= ring
->adev
;
2746 struct v9_mqd
*mqd
= ring
->mqd_ptr
;
2747 int mqd_idx
= AMDGPU_MAX_COMPUTE_RINGS
;
2749 gfx_v9_0_kiq_setting(ring
);
2751 if (adev
->gfx
.in_reset
) { /* for GPU_RESET case */
2752 /* reset MQD to a clean status */
2753 if (adev
->gfx
.mec
.mqd_backup
[mqd_idx
])
2754 memcpy(mqd
, adev
->gfx
.mec
.mqd_backup
[mqd_idx
], sizeof(*mqd
));
2756 /* reset ring buffer */
2758 amdgpu_ring_clear_ring(ring
);
2760 mutex_lock(&adev
->srbm_mutex
);
2761 soc15_grbm_select(adev
, ring
->me
, ring
->pipe
, ring
->queue
, 0);
2762 gfx_v9_0_kiq_init_register(ring
);
2763 soc15_grbm_select(adev
, 0, 0, 0, 0);
2764 mutex_unlock(&adev
->srbm_mutex
);
2766 memset((void *)mqd
, 0, sizeof(*mqd
));
2767 mutex_lock(&adev
->srbm_mutex
);
2768 soc15_grbm_select(adev
, ring
->me
, ring
->pipe
, ring
->queue
, 0);
2769 gfx_v9_0_mqd_init(ring
);
2770 gfx_v9_0_kiq_init_register(ring
);
2771 soc15_grbm_select(adev
, 0, 0, 0, 0);
2772 mutex_unlock(&adev
->srbm_mutex
);
2774 if (adev
->gfx
.mec
.mqd_backup
[mqd_idx
])
2775 memcpy(adev
->gfx
.mec
.mqd_backup
[mqd_idx
], mqd
, sizeof(*mqd
));
2781 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring
*ring
)
2783 struct amdgpu_device
*adev
= ring
->adev
;
2784 struct v9_mqd
*mqd
= ring
->mqd_ptr
;
2785 int mqd_idx
= ring
- &adev
->gfx
.compute_ring
[0];
2787 if (!adev
->gfx
.in_reset
&& !adev
->gfx
.in_suspend
) {
2788 memset((void *)mqd
, 0, sizeof(*mqd
));
2789 mutex_lock(&adev
->srbm_mutex
);
2790 soc15_grbm_select(adev
, ring
->me
, ring
->pipe
, ring
->queue
, 0);
2791 gfx_v9_0_mqd_init(ring
);
2792 soc15_grbm_select(adev
, 0, 0, 0, 0);
2793 mutex_unlock(&adev
->srbm_mutex
);
2795 if (adev
->gfx
.mec
.mqd_backup
[mqd_idx
])
2796 memcpy(adev
->gfx
.mec
.mqd_backup
[mqd_idx
], mqd
, sizeof(*mqd
));
2797 } else if (adev
->gfx
.in_reset
) { /* for GPU_RESET case */
2798 /* reset MQD to a clean status */
2799 if (adev
->gfx
.mec
.mqd_backup
[mqd_idx
])
2800 memcpy(mqd
, adev
->gfx
.mec
.mqd_backup
[mqd_idx
], sizeof(*mqd
));
2802 /* reset ring buffer */
2804 amdgpu_ring_clear_ring(ring
);
2806 amdgpu_ring_clear_ring(ring
);
2812 static int gfx_v9_0_kiq_resume(struct amdgpu_device
*adev
)
2814 struct amdgpu_ring
*ring
= NULL
;
2817 gfx_v9_0_cp_compute_enable(adev
, true);
2819 ring
= &adev
->gfx
.kiq
.ring
;
2821 r
= amdgpu_bo_reserve(ring
->mqd_obj
, false);
2822 if (unlikely(r
!= 0))
2825 r
= amdgpu_bo_kmap(ring
->mqd_obj
, (void **)&ring
->mqd_ptr
);
2827 r
= gfx_v9_0_kiq_init_queue(ring
);
2828 amdgpu_bo_kunmap(ring
->mqd_obj
);
2829 ring
->mqd_ptr
= NULL
;
2831 amdgpu_bo_unreserve(ring
->mqd_obj
);
2835 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
2836 ring
= &adev
->gfx
.compute_ring
[i
];
2838 r
= amdgpu_bo_reserve(ring
->mqd_obj
, false);
2839 if (unlikely(r
!= 0))
2841 r
= amdgpu_bo_kmap(ring
->mqd_obj
, (void **)&ring
->mqd_ptr
);
2843 r
= gfx_v9_0_kcq_init_queue(ring
);
2844 amdgpu_bo_kunmap(ring
->mqd_obj
);
2845 ring
->mqd_ptr
= NULL
;
2847 amdgpu_bo_unreserve(ring
->mqd_obj
);
2852 r
= gfx_v9_0_kiq_kcq_enable(adev
);
2857 static int gfx_v9_0_cp_resume(struct amdgpu_device
*adev
)
2860 struct amdgpu_ring
*ring
;
2862 if (!(adev
->flags
& AMD_IS_APU
))
2863 gfx_v9_0_enable_gui_idle_interrupt(adev
, false);
2865 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
2866 /* legacy firmware loading */
2867 r
= gfx_v9_0_cp_gfx_load_microcode(adev
);
2871 r
= gfx_v9_0_cp_compute_load_microcode(adev
);
2876 r
= gfx_v9_0_cp_gfx_resume(adev
);
2880 r
= gfx_v9_0_kiq_resume(adev
);
2884 ring
= &adev
->gfx
.gfx_ring
[0];
2885 r
= amdgpu_ring_test_ring(ring
);
2887 ring
->ready
= false;
2891 ring
= &adev
->gfx
.kiq
.ring
;
2893 r
= amdgpu_ring_test_ring(ring
);
2895 ring
->ready
= false;
2897 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
2898 ring
= &adev
->gfx
.compute_ring
[i
];
2901 r
= amdgpu_ring_test_ring(ring
);
2903 ring
->ready
= false;
2906 gfx_v9_0_enable_gui_idle_interrupt(adev
, true);
2911 static void gfx_v9_0_cp_enable(struct amdgpu_device
*adev
, bool enable
)
2913 gfx_v9_0_cp_gfx_enable(adev
, enable
);
2914 gfx_v9_0_cp_compute_enable(adev
, enable
);
2917 static int gfx_v9_0_hw_init(void *handle
)
2920 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2922 gfx_v9_0_init_golden_registers(adev
);
2924 gfx_v9_0_gpu_init(adev
);
2926 r
= gfx_v9_0_rlc_resume(adev
);
2930 r
= gfx_v9_0_cp_resume(adev
);
2934 r
= gfx_v9_0_ngg_en(adev
);
2941 static int gfx_v9_0_hw_fini(void *handle
)
2943 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2945 amdgpu_irq_put(adev
, &adev
->gfx
.priv_reg_irq
, 0);
2946 amdgpu_irq_put(adev
, &adev
->gfx
.priv_inst_irq
, 0);
2947 if (amdgpu_sriov_vf(adev
)) {
2948 pr_debug("For SRIOV client, shouldn't do anything.\n");
2951 gfx_v9_0_cp_enable(adev
, false);
2952 gfx_v9_0_rlc_stop(adev
);
2957 static int gfx_v9_0_suspend(void *handle
)
2959 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2961 adev
->gfx
.in_suspend
= true;
2962 return gfx_v9_0_hw_fini(adev
);
2965 static int gfx_v9_0_resume(void *handle
)
2967 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2970 r
= gfx_v9_0_hw_init(adev
);
2971 adev
->gfx
.in_suspend
= false;
2975 static bool gfx_v9_0_is_idle(void *handle
)
2977 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2979 if (REG_GET_FIELD(RREG32_SOC15(GC
, 0, mmGRBM_STATUS
),
2980 GRBM_STATUS
, GUI_ACTIVE
))
2986 static int gfx_v9_0_wait_for_idle(void *handle
)
2990 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2992 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2993 /* read MC_STATUS */
2994 tmp
= RREG32_SOC15(GC
, 0, mmGRBM_STATUS
) &
2995 GRBM_STATUS__GUI_ACTIVE_MASK
;
2997 if (!REG_GET_FIELD(tmp
, GRBM_STATUS
, GUI_ACTIVE
))
3004 static int gfx_v9_0_soft_reset(void *handle
)
3006 u32 grbm_soft_reset
= 0;
3008 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3011 tmp
= RREG32_SOC15(GC
, 0, mmGRBM_STATUS
);
3012 if (tmp
& (GRBM_STATUS__PA_BUSY_MASK
| GRBM_STATUS__SC_BUSY_MASK
|
3013 GRBM_STATUS__BCI_BUSY_MASK
| GRBM_STATUS__SX_BUSY_MASK
|
3014 GRBM_STATUS__TA_BUSY_MASK
| GRBM_STATUS__VGT_BUSY_MASK
|
3015 GRBM_STATUS__DB_BUSY_MASK
| GRBM_STATUS__CB_BUSY_MASK
|
3016 GRBM_STATUS__GDS_BUSY_MASK
| GRBM_STATUS__SPI_BUSY_MASK
|
3017 GRBM_STATUS__IA_BUSY_MASK
| GRBM_STATUS__IA_BUSY_NO_DMA_MASK
)) {
3018 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
3019 GRBM_SOFT_RESET
, SOFT_RESET_CP
, 1);
3020 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
3021 GRBM_SOFT_RESET
, SOFT_RESET_GFX
, 1);
3024 if (tmp
& (GRBM_STATUS__CP_BUSY_MASK
| GRBM_STATUS__CP_COHERENCY_BUSY_MASK
)) {
3025 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
3026 GRBM_SOFT_RESET
, SOFT_RESET_CP
, 1);
3030 tmp
= RREG32_SOC15(GC
, 0, mmGRBM_STATUS2
);
3031 if (REG_GET_FIELD(tmp
, GRBM_STATUS2
, RLC_BUSY
))
3032 grbm_soft_reset
= REG_SET_FIELD(grbm_soft_reset
,
3033 GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 1);
3036 if (grbm_soft_reset
) {
3038 gfx_v9_0_rlc_stop(adev
);
3040 /* Disable GFX parsing/prefetching */
3041 gfx_v9_0_cp_gfx_enable(adev
, false);
3043 /* Disable MEC parsing/prefetching */
3044 gfx_v9_0_cp_compute_enable(adev
, false);
3046 if (grbm_soft_reset
) {
3047 tmp
= RREG32_SOC15(GC
, 0, mmGRBM_SOFT_RESET
);
3048 tmp
|= grbm_soft_reset
;
3049 dev_info(adev
->dev
, "GRBM_SOFT_RESET=0x%08X\n", tmp
);
3050 WREG32_SOC15(GC
, 0, mmGRBM_SOFT_RESET
, tmp
);
3051 tmp
= RREG32_SOC15(GC
, 0, mmGRBM_SOFT_RESET
);
3055 tmp
&= ~grbm_soft_reset
;
3056 WREG32_SOC15(GC
, 0, mmGRBM_SOFT_RESET
, tmp
);
3057 tmp
= RREG32_SOC15(GC
, 0, mmGRBM_SOFT_RESET
);
3060 /* Wait a little for things to settle down */
3066 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device
*adev
)
3070 mutex_lock(&adev
->gfx
.gpu_clock_mutex
);
3071 WREG32_SOC15(GC
, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
3072 clock
= (uint64_t)RREG32_SOC15(GC
, 0, mmRLC_GPU_CLOCK_COUNT_LSB
) |
3073 ((uint64_t)RREG32_SOC15(GC
, 0, mmRLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
3074 mutex_unlock(&adev
->gfx
.gpu_clock_mutex
);
3078 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring
*ring
,
3080 uint32_t gds_base
, uint32_t gds_size
,
3081 uint32_t gws_base
, uint32_t gws_size
,
3082 uint32_t oa_base
, uint32_t oa_size
)
3084 gds_base
= gds_base
>> AMDGPU_GDS_SHIFT
;
3085 gds_size
= gds_size
>> AMDGPU_GDS_SHIFT
;
3087 gws_base
= gws_base
>> AMDGPU_GWS_SHIFT
;
3088 gws_size
= gws_size
>> AMDGPU_GWS_SHIFT
;
3090 oa_base
= oa_base
>> AMDGPU_OA_SHIFT
;
3091 oa_size
= oa_size
>> AMDGPU_OA_SHIFT
;
3094 gfx_v9_0_write_data_to_reg(ring
, 0, false,
3095 amdgpu_gds_reg_offset
[vmid
].mem_base
,
3099 gfx_v9_0_write_data_to_reg(ring
, 0, false,
3100 amdgpu_gds_reg_offset
[vmid
].mem_size
,
3104 gfx_v9_0_write_data_to_reg(ring
, 0, false,
3105 amdgpu_gds_reg_offset
[vmid
].gws
,
3106 gws_size
<< GDS_GWS_VMID0__SIZE__SHIFT
| gws_base
);
3109 gfx_v9_0_write_data_to_reg(ring
, 0, false,
3110 amdgpu_gds_reg_offset
[vmid
].oa
,
3111 (1 << (oa_size
+ oa_base
)) - (1 << oa_base
));
3114 static int gfx_v9_0_early_init(void *handle
)
3116 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3118 adev
->gfx
.num_gfx_rings
= GFX9_NUM_GFX_RINGS
;
3119 adev
->gfx
.num_compute_rings
= AMDGPU_MAX_COMPUTE_RINGS
;
3120 gfx_v9_0_set_ring_funcs(adev
);
3121 gfx_v9_0_set_irq_funcs(adev
);
3122 gfx_v9_0_set_gds_init(adev
);
3123 gfx_v9_0_set_rlc_funcs(adev
);
3128 static int gfx_v9_0_late_init(void *handle
)
3130 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3133 r
= amdgpu_irq_get(adev
, &adev
->gfx
.priv_reg_irq
, 0);
3137 r
= amdgpu_irq_get(adev
, &adev
->gfx
.priv_inst_irq
, 0);
3144 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device
*adev
)
3146 uint32_t rlc_setting
, data
;
3149 if (adev
->gfx
.rlc
.in_safe_mode
)
3152 /* if RLC is not enabled, do nothing */
3153 rlc_setting
= RREG32_SOC15(GC
, 0, mmRLC_CNTL
);
3154 if (!(rlc_setting
& RLC_CNTL__RLC_ENABLE_F32_MASK
))
3157 if (adev
->cg_flags
&
3158 (AMD_CG_SUPPORT_GFX_CGCG
| AMD_CG_SUPPORT_GFX_MGCG
|
3159 AMD_CG_SUPPORT_GFX_3D_CGCG
)) {
3160 data
= RLC_SAFE_MODE__CMD_MASK
;
3161 data
|= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT
);
3162 WREG32_SOC15(GC
, 0, mmRLC_SAFE_MODE
, data
);
3164 /* wait for RLC_SAFE_MODE */
3165 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3166 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC
, 0, mmRLC_SAFE_MODE
), RLC_SAFE_MODE
, CMD
))
3170 adev
->gfx
.rlc
.in_safe_mode
= true;
3174 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device
*adev
)
3176 uint32_t rlc_setting
, data
;
3178 if (!adev
->gfx
.rlc
.in_safe_mode
)
3181 /* if RLC is not enabled, do nothing */
3182 rlc_setting
= RREG32_SOC15(GC
, 0, mmRLC_CNTL
);
3183 if (!(rlc_setting
& RLC_CNTL__RLC_ENABLE_F32_MASK
))
3186 if (adev
->cg_flags
&
3187 (AMD_CG_SUPPORT_GFX_CGCG
| AMD_CG_SUPPORT_GFX_MGCG
)) {
3189 * Try to exit safe mode only if it is already in safe
3192 data
= RLC_SAFE_MODE__CMD_MASK
;
3193 WREG32_SOC15(GC
, 0, mmRLC_SAFE_MODE
, data
);
3194 adev
->gfx
.rlc
.in_safe_mode
= false;
3198 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device
*adev
,
3201 /* TODO: double check if we need to perform under safe mdoe */
3202 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3204 if ((adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) && enable
) {
3205 gfx_v9_0_enable_gfx_cg_power_gating(adev
, true);
3206 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PIPELINE
)
3207 gfx_v9_0_enable_gfx_pipeline_powergating(adev
, true);
3209 gfx_v9_0_enable_gfx_cg_power_gating(adev
, false);
3210 gfx_v9_0_enable_gfx_pipeline_powergating(adev
, false);
3213 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3216 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device
*adev
,
3219 /* TODO: double check if we need to perform under safe mode */
3220 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3222 if ((adev
->pg_flags
& AMD_PG_SUPPORT_GFX_SMG
) && enable
)
3223 gfx_v9_0_enable_gfx_static_mg_power_gating(adev
, true);
3225 gfx_v9_0_enable_gfx_static_mg_power_gating(adev
, false);
3227 if ((adev
->pg_flags
& AMD_PG_SUPPORT_GFX_DMG
) && enable
)
3228 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev
, true);
3230 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev
, false);
3232 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3235 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device
*adev
,
3240 /* It is disabled by HW by default */
3241 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGCG
)) {
3242 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3243 def
= data
= RREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
);
3244 data
&= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK
|
3245 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK
|
3246 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK
|
3247 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK
);
3249 /* only for Vega10 & Raven1 */
3250 data
|= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK
;
3253 WREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3255 /* MGLS is a global flag to control all MGLS in GFX */
3256 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGLS
) {
3257 /* 2 - RLC memory Light sleep */
3258 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_RLC_LS
) {
3259 def
= data
= RREG32_SOC15(GC
, 0, mmRLC_MEM_SLP_CNTL
);
3260 data
|= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
;
3262 WREG32_SOC15(GC
, 0, mmRLC_MEM_SLP_CNTL
, data
);
3264 /* 3 - CP memory Light sleep */
3265 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CP_LS
) {
3266 def
= data
= RREG32_SOC15(GC
, 0, mmCP_MEM_SLP_CNTL
);
3267 data
|= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
3269 WREG32_SOC15(GC
, 0, mmCP_MEM_SLP_CNTL
, data
);
3273 /* 1 - MGCG_OVERRIDE */
3274 def
= data
= RREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
);
3275 data
|= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK
|
3276 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK
|
3277 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK
|
3278 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK
|
3279 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK
);
3281 WREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3283 /* 2 - disable MGLS in RLC */
3284 data
= RREG32_SOC15(GC
, 0, mmRLC_MEM_SLP_CNTL
);
3285 if (data
& RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
) {
3286 data
&= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
;
3287 WREG32_SOC15(GC
, 0, mmRLC_MEM_SLP_CNTL
, data
);
3290 /* 3 - disable MGLS in CP */
3291 data
= RREG32_SOC15(GC
, 0, mmCP_MEM_SLP_CNTL
);
3292 if (data
& CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
) {
3293 data
&= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
3294 WREG32_SOC15(GC
, 0, mmCP_MEM_SLP_CNTL
, data
);
3299 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device
*adev
,
3304 adev
->gfx
.rlc
.funcs
->enter_safe_mode(adev
);
3306 /* Enable 3D CGCG/CGLS */
3307 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_3D_CGCG
)) {
3308 /* write cmd to clear cgcg/cgls ov */
3309 def
= data
= RREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
);
3310 /* unset CGCG override */
3311 data
&= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK
;
3312 /* update CGCG and CGLS override bits */
3314 WREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3315 /* enable 3Dcgcg FSM(0x0020003f) */
3316 def
= RREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL_3D
);
3317 data
= (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT
) |
3318 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK
;
3319 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_3D_CGLS
)
3320 data
|= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT
) |
3321 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK
;
3323 WREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL_3D
, data
);
3325 /* set IDLE_POLL_COUNT(0x00900100) */
3326 def
= RREG32_SOC15(GC
, 0, mmCP_RB_WPTR_POLL_CNTL
);
3327 data
= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT
) |
3328 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT
);
3330 WREG32_SOC15(GC
, 0, mmCP_RB_WPTR_POLL_CNTL
, data
);
3332 /* Disable CGCG/CGLS */
3333 def
= data
= RREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL_3D
);
3334 /* disable cgcg, cgls should be disabled */
3335 data
&= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK
|
3336 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK
);
3337 /* disable cgcg and cgls in FSM */
3339 WREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL_3D
, data
);
3342 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
3345 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device
*adev
,
3350 adev
->gfx
.rlc
.funcs
->enter_safe_mode(adev
);
3352 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGCG
)) {
3353 def
= data
= RREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
);
3354 /* unset CGCG override */
3355 data
&= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK
;
3356 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGLS
)
3357 data
&= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK
;
3359 data
|= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK
;
3360 /* update CGCG and CGLS override bits */
3362 WREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3364 /* enable cgcg FSM(0x0020003F) */
3365 def
= RREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL
);
3366 data
= (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT
) |
3367 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
;
3368 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGLS
)
3369 data
|= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT
) |
3370 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
;
3372 WREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL
, data
);
3374 /* set IDLE_POLL_COUNT(0x00900100) */
3375 def
= RREG32_SOC15(GC
, 0, mmCP_RB_WPTR_POLL_CNTL
);
3376 data
= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT
) |
3377 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT
);
3379 WREG32_SOC15(GC
, 0, mmCP_RB_WPTR_POLL_CNTL
, data
);
3381 def
= data
= RREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL
);
3382 /* reset CGCG/CGLS bits */
3383 data
&= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
| RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
);
3384 /* disable cgcg and cgls in FSM */
3386 WREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL
, data
);
3389 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
3392 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device
*adev
,
3396 /* CGCG/CGLS should be enabled after MGCG/MGLS
3397 * === MGCG + MGLS ===
3399 gfx_v9_0_update_medium_grain_clock_gating(adev
, enable
);
3400 /* === CGCG /CGLS for GFX 3D Only === */
3401 gfx_v9_0_update_3d_clock_gating(adev
, enable
);
3402 /* === CGCG + CGLS === */
3403 gfx_v9_0_update_coarse_grain_clock_gating(adev
, enable
);
3405 /* CGCG/CGLS should be disabled before MGCG/MGLS
3406 * === CGCG + CGLS ===
3408 gfx_v9_0_update_coarse_grain_clock_gating(adev
, enable
);
3409 /* === CGCG /CGLS for GFX 3D Only === */
3410 gfx_v9_0_update_3d_clock_gating(adev
, enable
);
3411 /* === MGCG + MGLS === */
3412 gfx_v9_0_update_medium_grain_clock_gating(adev
, enable
);
3417 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs
= {
3418 .enter_safe_mode
= gfx_v9_0_enter_rlc_safe_mode
,
3419 .exit_safe_mode
= gfx_v9_0_exit_rlc_safe_mode
3422 static int gfx_v9_0_set_powergating_state(void *handle
,
3423 enum amd_powergating_state state
)
3425 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3426 bool enable
= (state
== AMD_PG_STATE_GATE
) ? true : false;
3428 switch (adev
->asic_type
) {
3430 if (adev
->pg_flags
& AMD_PG_SUPPORT_RLC_SMU_HS
) {
3431 gfx_v9_0_enable_sck_slow_down_on_power_up(adev
, true);
3432 gfx_v9_0_enable_sck_slow_down_on_power_down(adev
, true);
3434 gfx_v9_0_enable_sck_slow_down_on_power_up(adev
, false);
3435 gfx_v9_0_enable_sck_slow_down_on_power_down(adev
, false);
3438 if (adev
->pg_flags
& AMD_PG_SUPPORT_CP
)
3439 gfx_v9_0_enable_cp_power_gating(adev
, true);
3441 gfx_v9_0_enable_cp_power_gating(adev
, false);
3443 /* update gfx cgpg state */
3444 gfx_v9_0_update_gfx_cg_power_gating(adev
, enable
);
3446 /* update mgcg state */
3447 gfx_v9_0_update_gfx_mg_power_gating(adev
, enable
);
3456 static int gfx_v9_0_set_clockgating_state(void *handle
,
3457 enum amd_clockgating_state state
)
3459 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3461 if (amdgpu_sriov_vf(adev
))
3464 switch (adev
->asic_type
) {
3467 gfx_v9_0_update_gfx_clock_gating(adev
,
3468 state
== AMD_CG_STATE_GATE
? true : false);
3476 static void gfx_v9_0_get_clockgating_state(void *handle
, u32
*flags
)
3478 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3481 if (amdgpu_sriov_vf(adev
))
3484 /* AMD_CG_SUPPORT_GFX_MGCG */
3485 data
= RREG32_SOC15(GC
, 0, mmRLC_CGTT_MGCG_OVERRIDE
);
3486 if (!(data
& RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK
))
3487 *flags
|= AMD_CG_SUPPORT_GFX_MGCG
;
3489 /* AMD_CG_SUPPORT_GFX_CGCG */
3490 data
= RREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL
);
3491 if (data
& RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
)
3492 *flags
|= AMD_CG_SUPPORT_GFX_CGCG
;
3494 /* AMD_CG_SUPPORT_GFX_CGLS */
3495 if (data
& RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
)
3496 *flags
|= AMD_CG_SUPPORT_GFX_CGLS
;
3498 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3499 data
= RREG32_SOC15(GC
, 0, mmRLC_MEM_SLP_CNTL
);
3500 if (data
& RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
)
3501 *flags
|= AMD_CG_SUPPORT_GFX_RLC_LS
| AMD_CG_SUPPORT_GFX_MGLS
;
3503 /* AMD_CG_SUPPORT_GFX_CP_LS */
3504 data
= RREG32_SOC15(GC
, 0, mmCP_MEM_SLP_CNTL
);
3505 if (data
& CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
)
3506 *flags
|= AMD_CG_SUPPORT_GFX_CP_LS
| AMD_CG_SUPPORT_GFX_MGLS
;
3508 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3509 data
= RREG32_SOC15(GC
, 0, mmRLC_CGCG_CGLS_CTRL_3D
);
3510 if (data
& RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK
)
3511 *flags
|= AMD_CG_SUPPORT_GFX_3D_CGCG
;
3513 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3514 if (data
& RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK
)
3515 *flags
|= AMD_CG_SUPPORT_GFX_3D_CGLS
;
3518 static u64
gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring
*ring
)
3520 return ring
->adev
->wb
.wb
[ring
->rptr_offs
]; /* gfx9 is 32bit rptr*/
3523 static u64
gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring
*ring
)
3525 struct amdgpu_device
*adev
= ring
->adev
;
3528 /* XXX check if swapping is necessary on BE */
3529 if (ring
->use_doorbell
) {
3530 wptr
= atomic64_read((atomic64_t
*)&adev
->wb
.wb
[ring
->wptr_offs
]);
3532 wptr
= RREG32_SOC15(GC
, 0, mmCP_RB0_WPTR
);
3533 wptr
+= (u64
)RREG32_SOC15(GC
, 0, mmCP_RB0_WPTR_HI
) << 32;
3539 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring
*ring
)
3541 struct amdgpu_device
*adev
= ring
->adev
;
3543 if (ring
->use_doorbell
) {
3544 /* XXX check if swapping is necessary on BE */
3545 atomic64_set((atomic64_t
*)&adev
->wb
.wb
[ring
->wptr_offs
], ring
->wptr
);
3546 WDOORBELL64(ring
->doorbell_index
, ring
->wptr
);
3548 WREG32_SOC15(GC
, 0, mmCP_RB0_WPTR
, lower_32_bits(ring
->wptr
));
3549 WREG32_SOC15(GC
, 0, mmCP_RB0_WPTR_HI
, upper_32_bits(ring
->wptr
));
3553 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
3555 u32 ref_and_mask
, reg_mem_engine
;
3556 struct nbio_hdp_flush_reg
*nbio_hf_reg
;
3558 if (ring
->adev
->asic_type
== CHIP_VEGA10
)
3559 nbio_hf_reg
= &nbio_v6_1_hdp_flush_reg
;
3561 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
) {
3564 ref_and_mask
= nbio_hf_reg
->ref_and_mask_cp2
<< ring
->pipe
;
3567 ref_and_mask
= nbio_hf_reg
->ref_and_mask_cp6
<< ring
->pipe
;
3574 ref_and_mask
= nbio_hf_reg
->ref_and_mask_cp0
;
3575 reg_mem_engine
= 1; /* pfp */
3578 gfx_v9_0_wait_reg_mem(ring
, reg_mem_engine
, 0, 1,
3579 nbio_hf_reg
->hdp_flush_req_offset
,
3580 nbio_hf_reg
->hdp_flush_done_offset
,
3581 ref_and_mask
, ref_and_mask
, 0x20);
3584 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring
*ring
)
3586 gfx_v9_0_write_data_to_reg(ring
, 0, true,
3587 SOC15_REG_OFFSET(HDP
, 0, mmHDP_DEBUG0
), 1);
3590 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring
*ring
,
3591 struct amdgpu_ib
*ib
,
3592 unsigned vm_id
, bool ctx_switch
)
3594 u32 header
, control
= 0;
3596 if (ib
->flags
& AMDGPU_IB_FLAG_CE
)
3597 header
= PACKET3(PACKET3_INDIRECT_BUFFER_CONST
, 2);
3599 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
3601 control
|= ib
->length_dw
| (vm_id
<< 24);
3603 if (amdgpu_sriov_vf(ring
->adev
) && (ib
->flags
& AMDGPU_IB_FLAG_PREEMPT
)) {
3604 control
|= INDIRECT_BUFFER_PRE_ENB(1);
3606 if (!(ib
->flags
& AMDGPU_IB_FLAG_CE
))
3607 gfx_v9_0_ring_emit_de_meta(ring
);
3610 amdgpu_ring_write(ring
, header
);
3611 BUG_ON(ib
->gpu_addr
& 0x3); /* Dword align */
3612 amdgpu_ring_write(ring
,
3616 lower_32_bits(ib
->gpu_addr
));
3617 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
3618 amdgpu_ring_write(ring
, control
);
3621 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring
*ring
,
3622 struct amdgpu_ib
*ib
,
3623 unsigned vm_id
, bool ctx_switch
)
3625 u32 control
= INDIRECT_BUFFER_VALID
| ib
->length_dw
| (vm_id
<< 24);
3627 amdgpu_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
3628 BUG_ON(ib
->gpu_addr
& 0x3); /* Dword align */
3629 amdgpu_ring_write(ring
,
3633 lower_32_bits(ib
->gpu_addr
));
3634 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
3635 amdgpu_ring_write(ring
, control
);
3638 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
,
3639 u64 seq
, unsigned flags
)
3641 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
3642 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
3644 /* RELEASE_MEM - flush caches, send int */
3645 amdgpu_ring_write(ring
, PACKET3(PACKET3_RELEASE_MEM
, 6));
3646 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
3648 EOP_TC_WB_ACTION_EN
|
3649 EOP_TC_MD_ACTION_EN
|
3650 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
3652 amdgpu_ring_write(ring
, DATA_SEL(write64bit
? 2 : 1) | INT_SEL(int_sel
? 2 : 0));
3655 * the address should be Qword aligned if 64bit write, Dword
3656 * aligned if only send 32bit data low (discard data high)
3662 amdgpu_ring_write(ring
, lower_32_bits(addr
));
3663 amdgpu_ring_write(ring
, upper_32_bits(addr
));
3664 amdgpu_ring_write(ring
, lower_32_bits(seq
));
3665 amdgpu_ring_write(ring
, upper_32_bits(seq
));
3666 amdgpu_ring_write(ring
, 0);
3669 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring
*ring
)
3671 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
3672 uint32_t seq
= ring
->fence_drv
.sync_seq
;
3673 uint64_t addr
= ring
->fence_drv
.gpu_addr
;
3675 gfx_v9_0_wait_reg_mem(ring
, usepfp
, 1, 0,
3676 lower_32_bits(addr
), upper_32_bits(addr
),
3677 seq
, 0xffffffff, 4);
3680 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
3681 unsigned vm_id
, uint64_t pd_addr
)
3683 struct amdgpu_vmhub
*hub
= &ring
->adev
->vmhub
[ring
->funcs
->vmhub
];
3684 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
3685 uint32_t req
= ring
->adev
->gart
.gart_funcs
->get_invalidate_req(vm_id
);
3686 unsigned eng
= ring
->vm_inv_eng
;
3688 pd_addr
= amdgpu_gart_get_vm_pde(ring
->adev
, pd_addr
);
3689 pd_addr
|= AMDGPU_PTE_VALID
;
3691 gfx_v9_0_write_data_to_reg(ring
, usepfp
, true,
3692 hub
->ctx0_ptb_addr_lo32
+ (2 * vm_id
),
3693 lower_32_bits(pd_addr
));
3695 gfx_v9_0_write_data_to_reg(ring
, usepfp
, true,
3696 hub
->ctx0_ptb_addr_hi32
+ (2 * vm_id
),
3697 upper_32_bits(pd_addr
));
3699 gfx_v9_0_write_data_to_reg(ring
, usepfp
, true,
3700 hub
->vm_inv_eng0_req
+ eng
, req
);
3702 /* wait for the invalidate to complete */
3703 gfx_v9_0_wait_reg_mem(ring
, 0, 0, 0, hub
->vm_inv_eng0_ack
+
3704 eng
, 0, 1 << vm_id
, 1 << vm_id
, 0x20);
3706 /* compute doesn't have PFP */
3708 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3709 amdgpu_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
3710 amdgpu_ring_write(ring
, 0x0);
3714 static u64
gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring
*ring
)
3716 return ring
->adev
->wb
.wb
[ring
->rptr_offs
]; /* gfx9 hardware is 32bit rptr */
3719 static u64
gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring
*ring
)
3723 /* XXX check if swapping is necessary on BE */
3724 if (ring
->use_doorbell
)
3725 wptr
= atomic64_read((atomic64_t
*)&ring
->adev
->wb
.wb
[ring
->wptr_offs
]);
3731 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring
*ring
)
3733 struct amdgpu_device
*adev
= ring
->adev
;
3735 /* XXX check if swapping is necessary on BE */
3736 if (ring
->use_doorbell
) {
3737 atomic64_set((atomic64_t
*)&adev
->wb
.wb
[ring
->wptr_offs
], ring
->wptr
);
3738 WDOORBELL64(ring
->doorbell_index
, ring
->wptr
);
3740 BUG(); /* only DOORBELL method supported on gfx9 now */
3744 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring
*ring
, u64 addr
,
3745 u64 seq
, unsigned int flags
)
3747 /* we only allocate 32bit for each seq wb address */
3748 BUG_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
3750 /* write fence seq to the "addr" */
3751 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
3752 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
3753 WRITE_DATA_DST_SEL(5) | WR_CONFIRM
));
3754 amdgpu_ring_write(ring
, lower_32_bits(addr
));
3755 amdgpu_ring_write(ring
, upper_32_bits(addr
));
3756 amdgpu_ring_write(ring
, lower_32_bits(seq
));
3758 if (flags
& AMDGPU_FENCE_FLAG_INT
) {
3759 /* set register to trigger INT */
3760 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
3761 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
3762 WRITE_DATA_DST_SEL(0) | WR_CONFIRM
));
3763 amdgpu_ring_write(ring
, SOC15_REG_OFFSET(GC
, 0, mmCPC_INT_STATUS
));
3764 amdgpu_ring_write(ring
, 0);
3765 amdgpu_ring_write(ring
, 0x20000000); /* src_id is 178 */
3769 static void gfx_v9_ring_emit_sb(struct amdgpu_ring
*ring
)
3771 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3772 amdgpu_ring_write(ring
, 0);
3775 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring
*ring
)
3777 static struct v9_ce_ib_state ce_payload
= {0};
3781 cnt
= (sizeof(ce_payload
) >> 2) + 4 - 2;
3782 csa_addr
= AMDGPU_VA_RESERVED_SIZE
- 2 * 4096;
3784 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, cnt
));
3785 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(2) |
3786 WRITE_DATA_DST_SEL(8) |
3788 WRITE_DATA_CACHE_POLICY(0));
3789 amdgpu_ring_write(ring
, lower_32_bits(csa_addr
+ offsetof(struct v9_gfx_meta_data
, ce_payload
)));
3790 amdgpu_ring_write(ring
, upper_32_bits(csa_addr
+ offsetof(struct v9_gfx_meta_data
, ce_payload
)));
3791 amdgpu_ring_write_multiple(ring
, (void *)&ce_payload
, sizeof(ce_payload
) >> 2);
3794 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring
*ring
)
3796 static struct v9_de_ib_state de_payload
= {0};
3797 uint64_t csa_addr
, gds_addr
;
3800 csa_addr
= AMDGPU_VA_RESERVED_SIZE
- 2 * 4096;
3801 gds_addr
= csa_addr
+ 4096;
3802 de_payload
.gds_backup_addrlo
= lower_32_bits(gds_addr
);
3803 de_payload
.gds_backup_addrhi
= upper_32_bits(gds_addr
);
3805 cnt
= (sizeof(de_payload
) >> 2) + 4 - 2;
3806 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, cnt
));
3807 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(1) |
3808 WRITE_DATA_DST_SEL(8) |
3810 WRITE_DATA_CACHE_POLICY(0));
3811 amdgpu_ring_write(ring
, lower_32_bits(csa_addr
+ offsetof(struct v9_gfx_meta_data
, de_payload
)));
3812 amdgpu_ring_write(ring
, upper_32_bits(csa_addr
+ offsetof(struct v9_gfx_meta_data
, de_payload
)));
3813 amdgpu_ring_write_multiple(ring
, (void *)&de_payload
, sizeof(de_payload
) >> 2);
3816 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring
*ring
, uint32_t flags
)
3820 if (amdgpu_sriov_vf(ring
->adev
))
3821 gfx_v9_0_ring_emit_ce_meta(ring
);
3823 dw2
|= 0x80000000; /* set load_enable otherwise this package is just NOPs */
3824 if (flags
& AMDGPU_HAVE_CTX_SWITCH
) {
3825 /* set load_global_config & load_global_uconfig */
3827 /* set load_cs_sh_regs */
3829 /* set load_per_context_state & load_gfx_sh_regs for GFX */
3832 /* set load_ce_ram if preamble presented */
3833 if (AMDGPU_PREAMBLE_IB_PRESENT
& flags
)
3836 /* still load_ce_ram if this is the first time preamble presented
3837 * although there is no context switch happens.
3839 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST
& flags
)
3843 amdgpu_ring_write(ring
, PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
3844 amdgpu_ring_write(ring
, dw2
);
3845 amdgpu_ring_write(ring
, 0);
3848 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring
*ring
)
3851 amdgpu_ring_write(ring
, PACKET3(PACKET3_COND_EXEC
, 3));
3852 amdgpu_ring_write(ring
, lower_32_bits(ring
->cond_exe_gpu_addr
));
3853 amdgpu_ring_write(ring
, upper_32_bits(ring
->cond_exe_gpu_addr
));
3854 amdgpu_ring_write(ring
, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
3855 ret
= ring
->wptr
& ring
->buf_mask
;
3856 amdgpu_ring_write(ring
, 0x55aa55aa); /* patch dummy value later */
3860 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring
*ring
, unsigned offset
)
3863 BUG_ON(offset
> ring
->buf_mask
);
3864 BUG_ON(ring
->ring
[offset
] != 0x55aa55aa);
3866 cur
= (ring
->wptr
& ring
->buf_mask
) - 1;
3867 if (likely(cur
> offset
))
3868 ring
->ring
[offset
] = cur
- offset
;
3870 ring
->ring
[offset
] = (ring
->ring_size
>>2) - offset
+ cur
;
3873 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring
*ring
, bool start
)
3875 amdgpu_ring_write(ring
, PACKET3(PACKET3_FRAME_CONTROL
, 0));
3876 amdgpu_ring_write(ring
, FRAME_CMD(start
? 0 : 1)); /* frame_end */
3879 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring
*ring
, uint32_t reg
)
3881 struct amdgpu_device
*adev
= ring
->adev
;
3883 amdgpu_ring_write(ring
, PACKET3(PACKET3_COPY_DATA
, 4));
3884 amdgpu_ring_write(ring
, 0 | /* src: register*/
3885 (5 << 8) | /* dst: memory */
3886 (1 << 20)); /* write confirm */
3887 amdgpu_ring_write(ring
, reg
);
3888 amdgpu_ring_write(ring
, 0);
3889 amdgpu_ring_write(ring
, lower_32_bits(adev
->wb
.gpu_addr
+
3890 adev
->virt
.reg_val_offs
* 4));
3891 amdgpu_ring_write(ring
, upper_32_bits(adev
->wb
.gpu_addr
+
3892 adev
->virt
.reg_val_offs
* 4));
3895 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring
*ring
, uint32_t reg
,
3898 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
3899 amdgpu_ring_write(ring
, (1 << 16)); /* no inc addr */
3900 amdgpu_ring_write(ring
, reg
);
3901 amdgpu_ring_write(ring
, 0);
3902 amdgpu_ring_write(ring
, val
);
3905 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device
*adev
,
3906 enum amdgpu_interrupt_state state
)
3909 case AMDGPU_IRQ_STATE_DISABLE
:
3910 case AMDGPU_IRQ_STATE_ENABLE
:
3911 WREG32_FIELD15(GC
, 0, CP_INT_CNTL_RING0
,
3912 TIME_STAMP_INT_ENABLE
,
3913 state
== AMDGPU_IRQ_STATE_ENABLE
? 1 : 0);
3920 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device
*adev
,
3922 enum amdgpu_interrupt_state state
)
3924 u32 mec_int_cntl
, mec_int_cntl_reg
;
3927 * amdgpu controls only the first MEC. That's why this function only
3928 * handles the setting of interrupts for this specific MEC. All other
3929 * pipes' interrupts are set by amdkfd.
3935 mec_int_cntl_reg
= SOC15_REG_OFFSET(GC
, 0, mmCP_ME1_PIPE0_INT_CNTL
);
3938 mec_int_cntl_reg
= SOC15_REG_OFFSET(GC
, 0, mmCP_ME1_PIPE1_INT_CNTL
);
3941 mec_int_cntl_reg
= SOC15_REG_OFFSET(GC
, 0, mmCP_ME1_PIPE2_INT_CNTL
);
3944 mec_int_cntl_reg
= SOC15_REG_OFFSET(GC
, 0, mmCP_ME1_PIPE3_INT_CNTL
);
3947 DRM_DEBUG("invalid pipe %d\n", pipe
);
3951 DRM_DEBUG("invalid me %d\n", me
);
3956 case AMDGPU_IRQ_STATE_DISABLE
:
3957 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
3958 mec_int_cntl
= REG_SET_FIELD(mec_int_cntl
, CP_ME1_PIPE0_INT_CNTL
,
3959 TIME_STAMP_INT_ENABLE
, 0);
3960 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
3962 case AMDGPU_IRQ_STATE_ENABLE
:
3963 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
3964 mec_int_cntl
= REG_SET_FIELD(mec_int_cntl
, CP_ME1_PIPE0_INT_CNTL
,
3965 TIME_STAMP_INT_ENABLE
, 1);
3966 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
3973 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device
*adev
,
3974 struct amdgpu_irq_src
*source
,
3976 enum amdgpu_interrupt_state state
)
3979 case AMDGPU_IRQ_STATE_DISABLE
:
3980 case AMDGPU_IRQ_STATE_ENABLE
:
3981 WREG32_FIELD15(GC
, 0, CP_INT_CNTL_RING0
,
3982 PRIV_REG_INT_ENABLE
,
3983 state
== AMDGPU_IRQ_STATE_ENABLE
? 1 : 0);
3992 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device
*adev
,
3993 struct amdgpu_irq_src
*source
,
3995 enum amdgpu_interrupt_state state
)
3998 case AMDGPU_IRQ_STATE_DISABLE
:
3999 case AMDGPU_IRQ_STATE_ENABLE
:
4000 WREG32_FIELD15(GC
, 0, CP_INT_CNTL_RING0
,
4001 PRIV_INSTR_INT_ENABLE
,
4002 state
== AMDGPU_IRQ_STATE_ENABLE
? 1 : 0);
4010 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device
*adev
,
4011 struct amdgpu_irq_src
*src
,
4013 enum amdgpu_interrupt_state state
)
4016 case AMDGPU_CP_IRQ_GFX_EOP
:
4017 gfx_v9_0_set_gfx_eop_interrupt_state(adev
, state
);
4019 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
:
4020 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 1, 0, state
);
4022 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP
:
4023 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 1, 1, state
);
4025 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP
:
4026 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 1, 2, state
);
4028 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP
:
4029 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 1, 3, state
);
4031 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP
:
4032 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 2, 0, state
);
4034 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP
:
4035 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 2, 1, state
);
4037 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP
:
4038 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 2, 2, state
);
4040 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP
:
4041 gfx_v9_0_set_compute_eop_interrupt_state(adev
, 2, 3, state
);
4049 static int gfx_v9_0_eop_irq(struct amdgpu_device
*adev
,
4050 struct amdgpu_irq_src
*source
,
4051 struct amdgpu_iv_entry
*entry
)
4054 u8 me_id
, pipe_id
, queue_id
;
4055 struct amdgpu_ring
*ring
;
4057 DRM_DEBUG("IH: CP EOP\n");
4058 me_id
= (entry
->ring_id
& 0x0c) >> 2;
4059 pipe_id
= (entry
->ring_id
& 0x03) >> 0;
4060 queue_id
= (entry
->ring_id
& 0x70) >> 4;
4064 amdgpu_fence_process(&adev
->gfx
.gfx_ring
[0]);
4068 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
4069 ring
= &adev
->gfx
.compute_ring
[i
];
4070 /* Per-queue interrupt is supported for MEC starting from VI.
4071 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4073 if ((ring
->me
== me_id
) && (ring
->pipe
== pipe_id
) && (ring
->queue
== queue_id
))
4074 amdgpu_fence_process(ring
);
4081 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device
*adev
,
4082 struct amdgpu_irq_src
*source
,
4083 struct amdgpu_iv_entry
*entry
)
4085 DRM_ERROR("Illegal register access in command stream\n");
4086 schedule_work(&adev
->reset_work
);
4090 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device
*adev
,
4091 struct amdgpu_irq_src
*source
,
4092 struct amdgpu_iv_entry
*entry
)
4094 DRM_ERROR("Illegal instruction in command stream\n");
4095 schedule_work(&adev
->reset_work
);
4099 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device
*adev
,
4100 struct amdgpu_irq_src
*src
,
4102 enum amdgpu_interrupt_state state
)
4104 uint32_t tmp
, target
;
4105 struct amdgpu_ring
*ring
= &(adev
->gfx
.kiq
.ring
);
4108 target
= SOC15_REG_OFFSET(GC
, 0, mmCP_ME1_PIPE0_INT_CNTL
);
4110 target
= SOC15_REG_OFFSET(GC
, 0, mmCP_ME2_PIPE0_INT_CNTL
);
4111 target
+= ring
->pipe
;
4114 case AMDGPU_CP_KIQ_IRQ_DRIVER0
:
4115 if (state
== AMDGPU_IRQ_STATE_DISABLE
) {
4116 tmp
= RREG32_SOC15(GC
, 0, mmCPC_INT_CNTL
);
4117 tmp
= REG_SET_FIELD(tmp
, CPC_INT_CNTL
,
4118 GENERIC2_INT_ENABLE
, 0);
4119 WREG32_SOC15(GC
, 0, mmCPC_INT_CNTL
, tmp
);
4121 tmp
= RREG32(target
);
4122 tmp
= REG_SET_FIELD(tmp
, CP_ME2_PIPE0_INT_CNTL
,
4123 GENERIC2_INT_ENABLE
, 0);
4124 WREG32(target
, tmp
);
4126 tmp
= RREG32_SOC15(GC
, 0, mmCPC_INT_CNTL
);
4127 tmp
= REG_SET_FIELD(tmp
, CPC_INT_CNTL
,
4128 GENERIC2_INT_ENABLE
, 1);
4129 WREG32_SOC15(GC
, 0, mmCPC_INT_CNTL
, tmp
);
4131 tmp
= RREG32(target
);
4132 tmp
= REG_SET_FIELD(tmp
, CP_ME2_PIPE0_INT_CNTL
,
4133 GENERIC2_INT_ENABLE
, 1);
4134 WREG32(target
, tmp
);
4138 BUG(); /* kiq only support GENERIC2_INT now */
4144 static int gfx_v9_0_kiq_irq(struct amdgpu_device
*adev
,
4145 struct amdgpu_irq_src
*source
,
4146 struct amdgpu_iv_entry
*entry
)
4148 u8 me_id
, pipe_id
, queue_id
;
4149 struct amdgpu_ring
*ring
= &(adev
->gfx
.kiq
.ring
);
4151 me_id
= (entry
->ring_id
& 0x0c) >> 2;
4152 pipe_id
= (entry
->ring_id
& 0x03) >> 0;
4153 queue_id
= (entry
->ring_id
& 0x70) >> 4;
4154 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4155 me_id
, pipe_id
, queue_id
);
4157 amdgpu_fence_process(ring
);
4161 const struct amd_ip_funcs gfx_v9_0_ip_funcs
= {
4163 .early_init
= gfx_v9_0_early_init
,
4164 .late_init
= gfx_v9_0_late_init
,
4165 .sw_init
= gfx_v9_0_sw_init
,
4166 .sw_fini
= gfx_v9_0_sw_fini
,
4167 .hw_init
= gfx_v9_0_hw_init
,
4168 .hw_fini
= gfx_v9_0_hw_fini
,
4169 .suspend
= gfx_v9_0_suspend
,
4170 .resume
= gfx_v9_0_resume
,
4171 .is_idle
= gfx_v9_0_is_idle
,
4172 .wait_for_idle
= gfx_v9_0_wait_for_idle
,
4173 .soft_reset
= gfx_v9_0_soft_reset
,
4174 .set_clockgating_state
= gfx_v9_0_set_clockgating_state
,
4175 .set_powergating_state
= gfx_v9_0_set_powergating_state
,
4176 .get_clockgating_state
= gfx_v9_0_get_clockgating_state
,
4179 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx
= {
4180 .type
= AMDGPU_RING_TYPE_GFX
,
4182 .nop
= PACKET3(PACKET3_NOP
, 0x3FFF),
4183 .support_64bit_ptrs
= true,
4184 .vmhub
= AMDGPU_GFXHUB
,
4185 .get_rptr
= gfx_v9_0_ring_get_rptr_gfx
,
4186 .get_wptr
= gfx_v9_0_ring_get_wptr_gfx
,
4187 .set_wptr
= gfx_v9_0_ring_set_wptr_gfx
,
4188 .emit_frame_size
= /* totally 242 maximum if 16 IBs */
4190 7 + /* PIPELINE_SYNC */
4192 8 + /* FENCE for VM_FLUSH */
4193 20 + /* GDS switch */
4194 4 + /* double SWITCH_BUFFER,
4195 the first COND_EXEC jump to the place just
4196 prior to this double SWITCH_BUFFER */
4204 8 + 8 + /* FENCE x2 */
4205 2, /* SWITCH_BUFFER */
4206 .emit_ib_size
= 4, /* gfx_v9_0_ring_emit_ib_gfx */
4207 .emit_ib
= gfx_v9_0_ring_emit_ib_gfx
,
4208 .emit_fence
= gfx_v9_0_ring_emit_fence
,
4209 .emit_pipeline_sync
= gfx_v9_0_ring_emit_pipeline_sync
,
4210 .emit_vm_flush
= gfx_v9_0_ring_emit_vm_flush
,
4211 .emit_gds_switch
= gfx_v9_0_ring_emit_gds_switch
,
4212 .emit_hdp_flush
= gfx_v9_0_ring_emit_hdp_flush
,
4213 .emit_hdp_invalidate
= gfx_v9_0_ring_emit_hdp_invalidate
,
4214 .test_ring
= gfx_v9_0_ring_test_ring
,
4215 .test_ib
= gfx_v9_0_ring_test_ib
,
4216 .insert_nop
= amdgpu_ring_insert_nop
,
4217 .pad_ib
= amdgpu_ring_generic_pad_ib
,
4218 .emit_switch_buffer
= gfx_v9_ring_emit_sb
,
4219 .emit_cntxcntl
= gfx_v9_ring_emit_cntxcntl
,
4220 .init_cond_exec
= gfx_v9_0_ring_emit_init_cond_exec
,
4221 .patch_cond_exec
= gfx_v9_0_ring_emit_patch_cond_exec
,
4222 .emit_tmz
= gfx_v9_0_ring_emit_tmz
,
4225 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute
= {
4226 .type
= AMDGPU_RING_TYPE_COMPUTE
,
4228 .nop
= PACKET3(PACKET3_NOP
, 0x3FFF),
4229 .support_64bit_ptrs
= true,
4230 .vmhub
= AMDGPU_GFXHUB
,
4231 .get_rptr
= gfx_v9_0_ring_get_rptr_compute
,
4232 .get_wptr
= gfx_v9_0_ring_get_wptr_compute
,
4233 .set_wptr
= gfx_v9_0_ring_set_wptr_compute
,
4235 20 + /* gfx_v9_0_ring_emit_gds_switch */
4236 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4237 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4238 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4239 24 + /* gfx_v9_0_ring_emit_vm_flush */
4240 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4241 .emit_ib_size
= 4, /* gfx_v9_0_ring_emit_ib_compute */
4242 .emit_ib
= gfx_v9_0_ring_emit_ib_compute
,
4243 .emit_fence
= gfx_v9_0_ring_emit_fence
,
4244 .emit_pipeline_sync
= gfx_v9_0_ring_emit_pipeline_sync
,
4245 .emit_vm_flush
= gfx_v9_0_ring_emit_vm_flush
,
4246 .emit_gds_switch
= gfx_v9_0_ring_emit_gds_switch
,
4247 .emit_hdp_flush
= gfx_v9_0_ring_emit_hdp_flush
,
4248 .emit_hdp_invalidate
= gfx_v9_0_ring_emit_hdp_invalidate
,
4249 .test_ring
= gfx_v9_0_ring_test_ring
,
4250 .test_ib
= gfx_v9_0_ring_test_ib
,
4251 .insert_nop
= amdgpu_ring_insert_nop
,
4252 .pad_ib
= amdgpu_ring_generic_pad_ib
,
4255 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq
= {
4256 .type
= AMDGPU_RING_TYPE_KIQ
,
4258 .nop
= PACKET3(PACKET3_NOP
, 0x3FFF),
4259 .support_64bit_ptrs
= true,
4260 .vmhub
= AMDGPU_GFXHUB
,
4261 .get_rptr
= gfx_v9_0_ring_get_rptr_compute
,
4262 .get_wptr
= gfx_v9_0_ring_get_wptr_compute
,
4263 .set_wptr
= gfx_v9_0_ring_set_wptr_compute
,
4265 20 + /* gfx_v9_0_ring_emit_gds_switch */
4266 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4267 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4268 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4269 24 + /* gfx_v9_0_ring_emit_vm_flush */
4270 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4271 .emit_ib_size
= 4, /* gfx_v9_0_ring_emit_ib_compute */
4272 .emit_ib
= gfx_v9_0_ring_emit_ib_compute
,
4273 .emit_fence
= gfx_v9_0_ring_emit_fence_kiq
,
4274 .test_ring
= gfx_v9_0_ring_test_ring
,
4275 .test_ib
= gfx_v9_0_ring_test_ib
,
4276 .insert_nop
= amdgpu_ring_insert_nop
,
4277 .pad_ib
= amdgpu_ring_generic_pad_ib
,
4278 .emit_rreg
= gfx_v9_0_ring_emit_rreg
,
4279 .emit_wreg
= gfx_v9_0_ring_emit_wreg
,
4282 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device
*adev
)
4286 adev
->gfx
.kiq
.ring
.funcs
= &gfx_v9_0_ring_funcs_kiq
;
4288 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
4289 adev
->gfx
.gfx_ring
[i
].funcs
= &gfx_v9_0_ring_funcs_gfx
;
4291 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
4292 adev
->gfx
.compute_ring
[i
].funcs
= &gfx_v9_0_ring_funcs_compute
;
4295 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs
= {
4296 .set
= gfx_v9_0_kiq_set_interrupt_state
,
4297 .process
= gfx_v9_0_kiq_irq
,
4300 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs
= {
4301 .set
= gfx_v9_0_set_eop_interrupt_state
,
4302 .process
= gfx_v9_0_eop_irq
,
4305 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs
= {
4306 .set
= gfx_v9_0_set_priv_reg_fault_state
,
4307 .process
= gfx_v9_0_priv_reg_irq
,
4310 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs
= {
4311 .set
= gfx_v9_0_set_priv_inst_fault_state
,
4312 .process
= gfx_v9_0_priv_inst_irq
,
4315 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device
*adev
)
4317 adev
->gfx
.eop_irq
.num_types
= AMDGPU_CP_IRQ_LAST
;
4318 adev
->gfx
.eop_irq
.funcs
= &gfx_v9_0_eop_irq_funcs
;
4320 adev
->gfx
.priv_reg_irq
.num_types
= 1;
4321 adev
->gfx
.priv_reg_irq
.funcs
= &gfx_v9_0_priv_reg_irq_funcs
;
4323 adev
->gfx
.priv_inst_irq
.num_types
= 1;
4324 adev
->gfx
.priv_inst_irq
.funcs
= &gfx_v9_0_priv_inst_irq_funcs
;
4326 adev
->gfx
.kiq
.irq
.num_types
= AMDGPU_CP_KIQ_IRQ_LAST
;
4327 adev
->gfx
.kiq
.irq
.funcs
= &gfx_v9_0_kiq_irq_funcs
;
4330 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device
*adev
)
4332 switch (adev
->asic_type
) {
4335 adev
->gfx
.rlc
.funcs
= &gfx_v9_0_rlc_funcs
;
4342 static void gfx_v9_0_set_gds_init(struct amdgpu_device
*adev
)
4344 /* init asci gds info */
4345 adev
->gds
.mem
.total_size
= RREG32_SOC15(GC
, 0, mmGDS_VMID0_SIZE
);
4346 adev
->gds
.gws
.total_size
= 64;
4347 adev
->gds
.oa
.total_size
= 16;
4349 if (adev
->gds
.mem
.total_size
== 64 * 1024) {
4350 adev
->gds
.mem
.gfx_partition_size
= 4096;
4351 adev
->gds
.mem
.cs_partition_size
= 4096;
4353 adev
->gds
.gws
.gfx_partition_size
= 4;
4354 adev
->gds
.gws
.cs_partition_size
= 4;
4356 adev
->gds
.oa
.gfx_partition_size
= 4;
4357 adev
->gds
.oa
.cs_partition_size
= 1;
4359 adev
->gds
.mem
.gfx_partition_size
= 1024;
4360 adev
->gds
.mem
.cs_partition_size
= 1024;
4362 adev
->gds
.gws
.gfx_partition_size
= 16;
4363 adev
->gds
.gws
.cs_partition_size
= 16;
4365 adev
->gds
.oa
.gfx_partition_size
= 4;
4366 adev
->gds
.oa
.cs_partition_size
= 4;
4370 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device
*adev
,
4378 data
= bitmap
<< GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT
;
4379 data
&= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK
;
4381 WREG32_SOC15(GC
, 0, mmGC_USER_SHADER_ARRAY_CONFIG
, data
);
4384 static u32
gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device
*adev
)
4388 data
= RREG32_SOC15(GC
, 0, mmCC_GC_SHADER_ARRAY_CONFIG
);
4389 data
|= RREG32_SOC15(GC
, 0, mmGC_USER_SHADER_ARRAY_CONFIG
);
4391 data
&= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK
;
4392 data
>>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT
;
4394 mask
= amdgpu_gfx_create_bitmask(adev
->gfx
.config
.max_cu_per_sh
);
4396 return (~data
) & mask
;
4399 static int gfx_v9_0_get_cu_info(struct amdgpu_device
*adev
,
4400 struct amdgpu_cu_info
*cu_info
)
4402 int i
, j
, k
, counter
, active_cu_number
= 0;
4403 u32 mask
, bitmap
, ao_bitmap
, ao_cu_mask
= 0;
4404 unsigned disable_masks
[4 * 2];
4406 if (!adev
|| !cu_info
)
4409 amdgpu_gfx_parse_disable_cu(disable_masks
, 4, 2);
4411 mutex_lock(&adev
->grbm_idx_mutex
);
4412 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
4413 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
4417 gfx_v9_0_select_se_sh(adev
, i
, j
, 0xffffffff);
4419 gfx_v9_0_set_user_cu_inactive_bitmap(
4420 adev
, disable_masks
[i
* 2 + j
]);
4421 bitmap
= gfx_v9_0_get_cu_active_bitmap(adev
);
4422 cu_info
->bitmap
[i
][j
] = bitmap
;
4424 for (k
= 0; k
< adev
->gfx
.config
.max_cu_per_sh
; k
++) {
4425 if (bitmap
& mask
) {
4426 if (counter
< adev
->gfx
.config
.max_cu_per_sh
)
4432 active_cu_number
+= counter
;
4434 ao_cu_mask
|= (ao_bitmap
<< (i
* 16 + j
* 8));
4435 cu_info
->ao_cu_bitmap
[i
][j
] = ao_bitmap
;
4438 gfx_v9_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
4439 mutex_unlock(&adev
->grbm_idx_mutex
);
4441 cu_info
->number
= active_cu_number
;
4442 cu_info
->ao_cu_mask
= ao_cu_mask
;
4447 const struct amdgpu_ip_block_version gfx_v9_0_ip_block
=
4449 .type
= AMD_IP_BLOCK_TYPE_GFX
,
4453 .funcs
= &gfx_v9_0_ip_funcs
,