]>
Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
248a1d6f | 24 | #include <drm/drmP.h> |
fd5fd480 | 25 | #include <drm/drm_cache.h> |
aaa36a97 AD |
26 | #include "amdgpu.h" |
27 | #include "gmc_v8_0.h" | |
28 | #include "amdgpu_ucode.h" | |
29 | ||
30 | #include "gmc/gmc_8_1_d.h" | |
31 | #include "gmc/gmc_8_1_sh_mask.h" | |
32 | ||
33 | #include "bif/bif_5_0_d.h" | |
34 | #include "bif/bif_5_0_sh_mask.h" | |
35 | ||
36 | #include "oss/oss_3_0_d.h" | |
37 | #include "oss/oss_3_0_sh_mask.h" | |
38 | ||
2e2bfd90 AD |
39 | #include "dce/dce_10_0_d.h" |
40 | #include "dce/dce_10_0_sh_mask.h" | |
41 | ||
aaa36a97 AD |
42 | #include "vid.h" |
43 | #include "vi.h" | |
44 | ||
1ce65f52 HW |
45 | #include "amdgpu_atombios.h" |
46 | ||
81c59f54 | 47 | |
132f34e4 | 48 | static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); |
aaa36a97 | 49 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); |
34e3205e | 50 | static int gmc_v8_0_wait_for_idle(void *handle); |
aaa36a97 | 51 | |
c65444fe | 52 | MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); |
2cc0c0b5 FC |
53 | MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); |
54 | MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); | |
c4642a47 | 55 | MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); |
aaa36a97 AD |
56 | |
57 | static const u32 golden_settings_tonga_a11[] = | |
58 | { | |
59 | mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, | |
60 | mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, | |
61 | mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, | |
62 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
63 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
64 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
65 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
66 | }; | |
67 | ||
68 | static const u32 tonga_mgcg_cgcg_init[] = | |
69 | { | |
70 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | |
71 | }; | |
72 | ||
127a2628 DZ |
73 | static const u32 golden_settings_fiji_a10[] = |
74 | { | |
75 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
76 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
77 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
78 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
79 | }; | |
80 | ||
81 | static const u32 fiji_mgcg_cgcg_init[] = | |
82 | { | |
83 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | |
84 | }; | |
85 | ||
2cc0c0b5 | 86 | static const u32 golden_settings_polaris11_a11[] = |
c9778572 FC |
87 | { |
88 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
89 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
90 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
91 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff | |
92 | }; | |
93 | ||
2cc0c0b5 | 94 | static const u32 golden_settings_polaris10_a11[] = |
c9778572 FC |
95 | { |
96 | mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, | |
97 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
98 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
99 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
100 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff | |
101 | }; | |
102 | ||
aaa36a97 AD |
103 | static const u32 cz_mgcg_cgcg_init[] = |
104 | { | |
105 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | |
106 | }; | |
107 | ||
aade2f04 SL |
108 | static const u32 stoney_mgcg_cgcg_init[] = |
109 | { | |
0711257e | 110 | mmATC_MISC_CG, 0xffffffff, 0x000c0200, |
aade2f04 SL |
111 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 |
112 | }; | |
113 | ||
6d51c813 HR |
114 | static const u32 golden_settings_stoney_common[] = |
115 | { | |
116 | mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, | |
117 | mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 | |
118 | }; | |
aade2f04 | 119 | |
aaa36a97 AD |
120 | static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) |
121 | { | |
122 | switch (adev->asic_type) { | |
127a2628 | 123 | case CHIP_FIJI: |
9c3f2b54 AD |
124 | amdgpu_device_program_register_sequence(adev, |
125 | fiji_mgcg_cgcg_init, | |
126 | ARRAY_SIZE(fiji_mgcg_cgcg_init)); | |
127 | amdgpu_device_program_register_sequence(adev, | |
128 | golden_settings_fiji_a10, | |
129 | ARRAY_SIZE(golden_settings_fiji_a10)); | |
127a2628 | 130 | break; |
aaa36a97 | 131 | case CHIP_TONGA: |
9c3f2b54 AD |
132 | amdgpu_device_program_register_sequence(adev, |
133 | tonga_mgcg_cgcg_init, | |
134 | ARRAY_SIZE(tonga_mgcg_cgcg_init)); | |
135 | amdgpu_device_program_register_sequence(adev, | |
136 | golden_settings_tonga_a11, | |
137 | ARRAY_SIZE(golden_settings_tonga_a11)); | |
aaa36a97 | 138 | break; |
2cc0c0b5 | 139 | case CHIP_POLARIS11: |
c4642a47 | 140 | case CHIP_POLARIS12: |
9c3f2b54 AD |
141 | amdgpu_device_program_register_sequence(adev, |
142 | golden_settings_polaris11_a11, | |
143 | ARRAY_SIZE(golden_settings_polaris11_a11)); | |
c9778572 | 144 | break; |
2cc0c0b5 | 145 | case CHIP_POLARIS10: |
9c3f2b54 AD |
146 | amdgpu_device_program_register_sequence(adev, |
147 | golden_settings_polaris10_a11, | |
148 | ARRAY_SIZE(golden_settings_polaris10_a11)); | |
c9778572 | 149 | break; |
aaa36a97 | 150 | case CHIP_CARRIZO: |
9c3f2b54 AD |
151 | amdgpu_device_program_register_sequence(adev, |
152 | cz_mgcg_cgcg_init, | |
153 | ARRAY_SIZE(cz_mgcg_cgcg_init)); | |
aaa36a97 | 154 | break; |
aade2f04 | 155 | case CHIP_STONEY: |
9c3f2b54 AD |
156 | amdgpu_device_program_register_sequence(adev, |
157 | stoney_mgcg_cgcg_init, | |
158 | ARRAY_SIZE(stoney_mgcg_cgcg_init)); | |
159 | amdgpu_device_program_register_sequence(adev, | |
160 | golden_settings_stoney_common, | |
161 | ARRAY_SIZE(golden_settings_stoney_common)); | |
aade2f04 | 162 | break; |
aaa36a97 AD |
163 | default: |
164 | break; | |
165 | } | |
166 | } | |
167 | ||
e4f6b39e | 168 | static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) |
aaa36a97 AD |
169 | { |
170 | u32 blackout; | |
171 | ||
34e3205e | 172 | gmc_v8_0_wait_for_idle(adev); |
aaa36a97 AD |
173 | |
174 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | |
175 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { | |
176 | /* Block CPU access */ | |
177 | WREG32(mmBIF_FB_EN, 0); | |
178 | /* blackout the MC */ | |
179 | blackout = REG_SET_FIELD(blackout, | |
180 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1); | |
181 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); | |
182 | } | |
183 | /* wait for the MC to settle */ | |
184 | udelay(100); | |
185 | } | |
186 | ||
e4f6b39e | 187 | static void gmc_v8_0_mc_resume(struct amdgpu_device *adev) |
aaa36a97 AD |
188 | { |
189 | u32 tmp; | |
190 | ||
191 | /* unblackout the MC */ | |
192 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | |
193 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); | |
194 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); | |
195 | /* allow CPU access */ | |
196 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); | |
197 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); | |
198 | WREG32(mmBIF_FB_EN, tmp); | |
aaa36a97 AD |
199 | } |
200 | ||
201 | /** | |
202 | * gmc_v8_0_init_microcode - load ucode images from disk | |
203 | * | |
204 | * @adev: amdgpu_device pointer | |
205 | * | |
206 | * Use the firmware interface to load the ucode images into | |
207 | * the driver (not loaded into hw). | |
208 | * Returns 0 on success, error on failure. | |
209 | */ | |
210 | static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) | |
211 | { | |
212 | const char *chip_name; | |
213 | char fw_name[30]; | |
214 | int err; | |
215 | ||
216 | DRM_DEBUG("\n"); | |
217 | ||
218 | switch (adev->asic_type) { | |
aaa36a97 AD |
219 | case CHIP_TONGA: |
220 | chip_name = "tonga"; | |
221 | break; | |
2cc0c0b5 FC |
222 | case CHIP_POLARIS11: |
223 | chip_name = "polaris11"; | |
c9778572 | 224 | break; |
2cc0c0b5 FC |
225 | case CHIP_POLARIS10: |
226 | chip_name = "polaris10"; | |
c9778572 | 227 | break; |
c4642a47 JZ |
228 | case CHIP_POLARIS12: |
229 | chip_name = "polaris12"; | |
230 | break; | |
127a2628 | 231 | case CHIP_FIJI: |
aaa36a97 | 232 | case CHIP_CARRIZO: |
aade2f04 | 233 | case CHIP_STONEY: |
aaa36a97 AD |
234 | return 0; |
235 | default: BUG(); | |
236 | } | |
237 | ||
c65444fe | 238 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); |
770d13b1 | 239 | err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); |
aaa36a97 AD |
240 | if (err) |
241 | goto out; | |
770d13b1 | 242 | err = amdgpu_ucode_validate(adev->gmc.fw); |
aaa36a97 AD |
243 | |
244 | out: | |
245 | if (err) { | |
7ca85295 | 246 | pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); |
770d13b1 CK |
247 | release_firmware(adev->gmc.fw); |
248 | adev->gmc.fw = NULL; | |
aaa36a97 AD |
249 | } |
250 | return err; | |
251 | } | |
252 | ||
253 | /** | |
0d52c6a1 | 254 | * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw |
aaa36a97 AD |
255 | * |
256 | * @adev: amdgpu_device pointer | |
257 | * | |
258 | * Load the GDDR MC ucode into the hw (CIK). | |
259 | * Returns 0 on success, error on failure. | |
260 | */ | |
0d52c6a1 | 261 | static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev) |
aaa36a97 AD |
262 | { |
263 | const struct mc_firmware_header_v1_0 *hdr; | |
264 | const __le32 *fw_data = NULL; | |
265 | const __le32 *io_mc_regs = NULL; | |
887656f0 | 266 | u32 running; |
aaa36a97 AD |
267 | int i, ucode_size, regs_size; |
268 | ||
c12d2871 AD |
269 | /* Skip MC ucode loading on SR-IOV capable boards. |
270 | * vbios does this for us in asic_init in that case. | |
4e99a44e ML |
271 | * Skip MC ucode loading on VF, because hypervisor will do that |
272 | * for this adaptor. | |
c12d2871 | 273 | */ |
4e99a44e | 274 | if (amdgpu_sriov_bios(adev)) |
c12d2871 AD |
275 | return 0; |
276 | ||
770d13b1 | 277 | if (!adev->gmc.fw) |
0d52c6a1 RZ |
278 | return -EINVAL; |
279 | ||
770d13b1 | 280 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
aaa36a97 AD |
281 | amdgpu_ucode_print_mc_hdr(&hdr->header); |
282 | ||
770d13b1 | 283 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); |
aaa36a97 AD |
284 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); |
285 | io_mc_regs = (const __le32 *) | |
770d13b1 | 286 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); |
aaa36a97 AD |
287 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
288 | fw_data = (const __le32 *) | |
770d13b1 | 289 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
aaa36a97 AD |
290 | |
291 | running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); | |
292 | ||
293 | if (running == 0) { | |
aaa36a97 AD |
294 | /* reset the engine and set to writable */ |
295 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
296 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | |
297 | ||
298 | /* load mc io regs */ | |
299 | for (i = 0; i < regs_size; i++) { | |
300 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); | |
301 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); | |
302 | } | |
303 | /* load the MC ucode */ | |
304 | for (i = 0; i < ucode_size; i++) | |
305 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); | |
306 | ||
307 | /* put the engine back into the active state */ | |
308 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
309 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | |
310 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | |
311 | ||
312 | /* wait for training to complete */ | |
313 | for (i = 0; i < adev->usec_timeout; i++) { | |
314 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | |
315 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) | |
316 | break; | |
317 | udelay(1); | |
318 | } | |
319 | for (i = 0; i < adev->usec_timeout; i++) { | |
320 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | |
321 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) | |
322 | break; | |
323 | udelay(1); | |
324 | } | |
aaa36a97 AD |
325 | } |
326 | ||
327 | return 0; | |
328 | } | |
329 | ||
0d52c6a1 RZ |
330 | static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) |
331 | { | |
332 | const struct mc_firmware_header_v1_0 *hdr; | |
333 | const __le32 *fw_data = NULL; | |
334 | const __le32 *io_mc_regs = NULL; | |
335 | u32 data, vbios_version; | |
336 | int i, ucode_size, regs_size; | |
337 | ||
338 | /* Skip MC ucode loading on SR-IOV capable boards. | |
339 | * vbios does this for us in asic_init in that case. | |
340 | * Skip MC ucode loading on VF, because hypervisor will do that | |
341 | * for this adaptor. | |
342 | */ | |
343 | if (amdgpu_sriov_bios(adev)) | |
344 | return 0; | |
345 | ||
346 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); | |
347 | data = RREG32(mmMC_SEQ_IO_DEBUG_DATA); | |
348 | vbios_version = data & 0xf; | |
349 | ||
350 | if (vbios_version == 0) | |
351 | return 0; | |
352 | ||
770d13b1 | 353 | if (!adev->gmc.fw) |
0d52c6a1 RZ |
354 | return -EINVAL; |
355 | ||
770d13b1 | 356 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
0d52c6a1 RZ |
357 | amdgpu_ucode_print_mc_hdr(&hdr->header); |
358 | ||
770d13b1 | 359 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); |
0d52c6a1 RZ |
360 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); |
361 | io_mc_regs = (const __le32 *) | |
770d13b1 | 362 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); |
0d52c6a1 RZ |
363 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
364 | fw_data = (const __le32 *) | |
770d13b1 | 365 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
0d52c6a1 RZ |
366 | |
367 | data = RREG32(mmMC_SEQ_MISC0); | |
368 | data &= ~(0x40); | |
369 | WREG32(mmMC_SEQ_MISC0, data); | |
370 | ||
371 | /* load mc io regs */ | |
372 | for (i = 0; i < regs_size; i++) { | |
373 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); | |
374 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); | |
375 | } | |
376 | ||
377 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
378 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | |
379 | ||
380 | /* load the MC ucode */ | |
381 | for (i = 0; i < ucode_size; i++) | |
382 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); | |
383 | ||
384 | /* put the engine back into the active state */ | |
385 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
386 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | |
387 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | |
388 | ||
389 | /* wait for training to complete */ | |
390 | for (i = 0; i < adev->usec_timeout; i++) { | |
391 | data = RREG32(mmMC_SEQ_MISC0); | |
392 | if (data & 0x80) | |
393 | break; | |
394 | udelay(1); | |
395 | } | |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
aaa36a97 | 400 | static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, |
770d13b1 | 401 | struct amdgpu_gmc *mc) |
aaa36a97 | 402 | { |
e72b9912 ED |
403 | u64 base = 0; |
404 | ||
405 | if (!amdgpu_sriov_vf(adev)) | |
406 | base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; | |
9081c4cf AD |
407 | base <<= 24; |
408 | ||
770d13b1 | 409 | amdgpu_device_vram_location(adev, &adev->gmc, base); |
2543e28a | 410 | amdgpu_device_gart_location(adev, mc); |
aaa36a97 AD |
411 | } |
412 | ||
413 | /** | |
414 | * gmc_v8_0_mc_program - program the GPU memory controller | |
415 | * | |
416 | * @adev: amdgpu_device pointer | |
417 | * | |
418 | * Set the location of vram, gart, and AGP in the GPU's | |
419 | * physical address space (CIK). | |
420 | */ | |
421 | static void gmc_v8_0_mc_program(struct amdgpu_device *adev) | |
422 | { | |
aaa36a97 AD |
423 | u32 tmp; |
424 | int i, j; | |
425 | ||
426 | /* Initialize HDP */ | |
427 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | |
428 | WREG32((0xb05 + j), 0x00000000); | |
429 | WREG32((0xb06 + j), 0x00000000); | |
430 | WREG32((0xb07 + j), 0x00000000); | |
431 | WREG32((0xb08 + j), 0x00000000); | |
432 | WREG32((0xb09 + j), 0x00000000); | |
433 | } | |
434 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); | |
435 | ||
34e3205e | 436 | if (gmc_v8_0_wait_for_idle((void *)adev)) { |
aaa36a97 AD |
437 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
438 | } | |
2e2bfd90 AD |
439 | if (adev->mode_info.num_crtc) { |
440 | /* Lockout access through VGA aperture*/ | |
441 | tmp = RREG32(mmVGA_HDP_CONTROL); | |
442 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); | |
443 | WREG32(mmVGA_HDP_CONTROL, tmp); | |
444 | ||
445 | /* disable VGA render */ | |
446 | tmp = RREG32(mmVGA_RENDER_CONTROL); | |
447 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); | |
448 | WREG32(mmVGA_RENDER_CONTROL, tmp); | |
449 | } | |
aaa36a97 AD |
450 | /* Update configuration */ |
451 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
770d13b1 | 452 | adev->gmc.vram_start >> 12); |
aaa36a97 | 453 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
770d13b1 | 454 | adev->gmc.vram_end >> 12); |
aaa36a97 AD |
455 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, |
456 | adev->vram_scratch.gpu_addr >> 12); | |
e72b9912 ED |
457 | |
458 | if (amdgpu_sriov_vf(adev)) { | |
770d13b1 CK |
459 | tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16; |
460 | tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF); | |
e72b9912 ED |
461 | WREG32(mmMC_VM_FB_LOCATION, tmp); |
462 | /* XXX double check these! */ | |
770d13b1 | 463 | WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); |
e72b9912 ED |
464 | WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); |
465 | WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); | |
466 | } | |
467 | ||
aaa36a97 AD |
468 | WREG32(mmMC_VM_AGP_BASE, 0); |
469 | WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); | |
470 | WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); | |
34e3205e | 471 | if (gmc_v8_0_wait_for_idle((void *)adev)) { |
aaa36a97 AD |
472 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
473 | } | |
aaa36a97 AD |
474 | |
475 | WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); | |
476 | ||
477 | tmp = RREG32(mmHDP_MISC_CNTL); | |
13459bd0 | 478 | tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0); |
aaa36a97 AD |
479 | WREG32(mmHDP_MISC_CNTL, tmp); |
480 | ||
481 | tmp = RREG32(mmHDP_HOST_PATH_CNTL); | |
482 | WREG32(mmHDP_HOST_PATH_CNTL, tmp); | |
483 | } | |
484 | ||
485 | /** | |
486 | * gmc_v8_0_mc_init - initialize the memory controller driver params | |
487 | * | |
488 | * @adev: amdgpu_device pointer | |
489 | * | |
490 | * Look up the amount of vram, vram width, and decide how to place | |
491 | * vram and gart within the GPU's physical address space (CIK). | |
492 | * Returns 0 for success. | |
493 | */ | |
494 | static int gmc_v8_0_mc_init(struct amdgpu_device *adev) | |
495 | { | |
d6895ad3 CK |
496 | int r; |
497 | ||
770d13b1 CK |
498 | adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); |
499 | if (!adev->gmc.vram_width) { | |
1ce65f52 HW |
500 | u32 tmp; |
501 | int chansize, numchan; | |
502 | ||
503 | /* Get VRAM informations */ | |
504 | tmp = RREG32(mmMC_ARB_RAMCFG); | |
505 | if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { | |
506 | chansize = 64; | |
507 | } else { | |
508 | chansize = 32; | |
509 | } | |
510 | tmp = RREG32(mmMC_SHARED_CHMAP); | |
511 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { | |
512 | case 0: | |
513 | default: | |
514 | numchan = 1; | |
515 | break; | |
516 | case 1: | |
517 | numchan = 2; | |
518 | break; | |
519 | case 2: | |
520 | numchan = 4; | |
521 | break; | |
522 | case 3: | |
523 | numchan = 8; | |
524 | break; | |
525 | case 4: | |
526 | numchan = 3; | |
527 | break; | |
528 | case 5: | |
529 | numchan = 6; | |
530 | break; | |
531 | case 6: | |
532 | numchan = 10; | |
533 | break; | |
534 | case 7: | |
535 | numchan = 12; | |
536 | break; | |
537 | case 8: | |
538 | numchan = 16; | |
539 | break; | |
540 | } | |
770d13b1 | 541 | adev->gmc.vram_width = numchan * chansize; |
aaa36a97 | 542 | } |
aaa36a97 | 543 | /* size in MB on si */ |
770d13b1 CK |
544 | adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
545 | adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | |
999446a7 | 546 | |
d6895ad3 CK |
547 | if (!(adev->flags & AMD_IS_APU)) { |
548 | r = amdgpu_device_resize_fb_bar(adev); | |
549 | if (r) | |
550 | return r; | |
551 | } | |
770d13b1 CK |
552 | adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); |
553 | adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); | |
d6895ad3 | 554 | |
999446a7 CK |
555 | #ifdef CONFIG_X86_64 |
556 | if (adev->flags & AMD_IS_APU) { | |
770d13b1 CK |
557 | adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; |
558 | adev->gmc.aper_size = adev->gmc.real_vram_size; | |
999446a7 CK |
559 | } |
560 | #endif | |
aaa36a97 | 561 | |
a1493cd5 | 562 | /* In case the PCI BAR is larger than the actual amount of vram */ |
770d13b1 CK |
563 | adev->gmc.visible_vram_size = adev->gmc.aper_size; |
564 | if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) | |
565 | adev->gmc.visible_vram_size = adev->gmc.real_vram_size; | |
a1493cd5 | 566 | |
c3db7b5a AD |
567 | /* set the gart size */ |
568 | if (amdgpu_gart_size == -1) { | |
569 | switch (adev->asic_type) { | |
570 | case CHIP_POLARIS11: /* all engines support GPUVM */ | |
571 | case CHIP_POLARIS10: /* all engines support GPUVM */ | |
572 | case CHIP_POLARIS12: /* all engines support GPUVM */ | |
573 | default: | |
770d13b1 | 574 | adev->gmc.gart_size = 256ULL << 20; |
c3db7b5a AD |
575 | break; |
576 | case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ | |
577 | case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ | |
578 | case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ | |
579 | case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ | |
770d13b1 | 580 | adev->gmc.gart_size = 1024ULL << 20; |
c3db7b5a AD |
581 | break; |
582 | } | |
583 | } else { | |
770d13b1 | 584 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
c3db7b5a AD |
585 | } |
586 | ||
770d13b1 | 587 | gmc_v8_0_vram_gtt_location(adev, &adev->gmc); |
aaa36a97 AD |
588 | |
589 | return 0; | |
590 | } | |
591 | ||
592 | /* | |
593 | * GART | |
594 | * VMID 0 is the physical GPU addresses as used by the kernel. | |
595 | * VMIDs 1-15 are used for userspace clients and are handled | |
596 | * by the amdgpu vm/hsa code. | |
597 | */ | |
598 | ||
599 | /** | |
132f34e4 | 600 | * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback |
aaa36a97 AD |
601 | * |
602 | * @adev: amdgpu_device pointer | |
603 | * @vmid: vm instance to flush | |
604 | * | |
605 | * Flush the TLB for the requested page table (CIK). | |
606 | */ | |
132f34e4 | 607 | static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, |
aaa36a97 AD |
608 | uint32_t vmid) |
609 | { | |
aaa36a97 AD |
610 | /* bits 0-15 are the VM contexts0-15 */ |
611 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | |
612 | } | |
613 | ||
5518625d CK |
614 | static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
615 | unsigned vmid, unsigned pasid, | |
616 | uint64_t pd_addr) | |
617 | { | |
618 | uint32_t reg; | |
619 | ||
620 | if (vmid < 8) | |
621 | reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; | |
622 | else | |
623 | reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; | |
624 | amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); | |
625 | ||
86ea2ff6 CK |
626 | amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); |
627 | ||
5518625d CK |
628 | /* bits 0-15 are the VM contexts0-15 */ |
629 | amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); | |
630 | ||
631 | return pd_addr; | |
632 | } | |
633 | ||
aaa36a97 | 634 | /** |
132f34e4 | 635 | * gmc_v8_0_set_pte_pde - update the page tables using MMIO |
aaa36a97 AD |
636 | * |
637 | * @adev: amdgpu_device pointer | |
638 | * @cpu_pt_addr: cpu address of the page table | |
639 | * @gpu_page_idx: entry in the page table to update | |
640 | * @addr: dst addr to write into pte/pde | |
641 | * @flags: access flags | |
642 | * | |
643 | * Update the page tables using the CPU. | |
644 | */ | |
132f34e4 CK |
645 | static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, |
646 | uint32_t gpu_page_idx, uint64_t addr, | |
647 | uint64_t flags) | |
aaa36a97 AD |
648 | { |
649 | void __iomem *ptr = (void *)cpu_pt_addr; | |
650 | uint64_t value; | |
651 | ||
652 | /* | |
653 | * PTE format on VI: | |
654 | * 63:40 reserved | |
655 | * 39:12 4k physical page base address | |
656 | * 11:7 fragment | |
657 | * 6 write | |
658 | * 5 read | |
659 | * 4 exe | |
660 | * 3 reserved | |
661 | * 2 snooped | |
662 | * 1 system | |
663 | * 0 valid | |
664 | * | |
665 | * PDE format on VI: | |
666 | * 63:59 block fragment size | |
667 | * 58:40 reserved | |
668 | * 39:1 physical base address of PTE | |
669 | * bits 5:1 must be 0. | |
670 | * 0 valid | |
671 | */ | |
672 | value = addr & 0x000000FFFFFFF000ULL; | |
673 | value |= flags; | |
674 | writeq(value, ptr + (gpu_page_idx * 8)); | |
675 | ||
676 | return 0; | |
677 | } | |
678 | ||
5463545b AX |
679 | static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev, |
680 | uint32_t flags) | |
681 | { | |
682 | uint64_t pte_flag = 0; | |
683 | ||
684 | if (flags & AMDGPU_VM_PAGE_EXECUTABLE) | |
685 | pte_flag |= AMDGPU_PTE_EXECUTABLE; | |
686 | if (flags & AMDGPU_VM_PAGE_READABLE) | |
687 | pte_flag |= AMDGPU_PTE_READABLE; | |
688 | if (flags & AMDGPU_VM_PAGE_WRITEABLE) | |
689 | pte_flag |= AMDGPU_PTE_WRITEABLE; | |
690 | if (flags & AMDGPU_VM_PAGE_PRT) | |
691 | pte_flag |= AMDGPU_PTE_PRT; | |
692 | ||
693 | return pte_flag; | |
694 | } | |
695 | ||
3de676d8 CK |
696 | static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, |
697 | uint64_t *addr, uint64_t *flags) | |
b1166325 | 698 | { |
3de676d8 | 699 | BUG_ON(*addr & 0xFFFFFF0000000FFFULL); |
b1166325 CK |
700 | } |
701 | ||
d9c13156 CK |
702 | /** |
703 | * gmc_v8_0_set_fault_enable_default - update VM fault handling | |
704 | * | |
705 | * @adev: amdgpu_device pointer | |
706 | * @value: true redirects VM faults to the default page | |
707 | */ | |
708 | static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev, | |
709 | bool value) | |
710 | { | |
711 | u32 tmp; | |
712 | ||
713 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
714 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
715 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
716 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
717 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
718 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
719 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
720 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
721 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
722 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
723 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
724 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
725 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
726 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
727 | EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
728 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | |
729 | } | |
730 | ||
603adfe8 CK |
731 | /** |
732 | * gmc_v8_0_set_prt - set PRT VM fault | |
733 | * | |
734 | * @adev: amdgpu_device pointer | |
735 | * @enable: enable/disable VM fault handling for PRT | |
736 | */ | |
737 | static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) | |
738 | { | |
739 | u32 tmp; | |
740 | ||
770d13b1 | 741 | if (enable && !adev->gmc.prt_warning) { |
603adfe8 | 742 | dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); |
770d13b1 | 743 | adev->gmc.prt_warning = true; |
603adfe8 CK |
744 | } |
745 | ||
746 | tmp = RREG32(mmVM_PRT_CNTL); | |
747 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
748 | CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); | |
749 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
750 | CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); | |
751 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
752 | TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); | |
753 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
754 | TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); | |
755 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
756 | L2_CACHE_STORE_INVALID_ENTRIES, enable); | |
757 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
758 | L1_TLB_STORE_INVALID_ENTRIES, enable); | |
759 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
760 | MASK_PDE0_FAULT, enable); | |
761 | WREG32(mmVM_PRT_CNTL, tmp); | |
762 | ||
763 | if (enable) { | |
764 | uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; | |
765 | uint32_t high = adev->vm_manager.max_pfn; | |
766 | ||
767 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); | |
768 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); | |
769 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); | |
770 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); | |
771 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); | |
772 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); | |
773 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); | |
774 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); | |
775 | } else { | |
776 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); | |
777 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); | |
778 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); | |
779 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); | |
780 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); | |
781 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); | |
782 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); | |
783 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); | |
784 | } | |
785 | } | |
786 | ||
aaa36a97 AD |
787 | /** |
788 | * gmc_v8_0_gart_enable - gart enable | |
789 | * | |
790 | * @adev: amdgpu_device pointer | |
791 | * | |
792 | * This sets up the TLBs, programs the page tables for VMID0, | |
793 | * sets up the hw for VMIDs 1-15 which are allocated on | |
794 | * demand, and sets up the global locations for the LDS, GDS, | |
795 | * and GPUVM for FSA64 clients (CIK). | |
796 | * Returns 0 for success, errors for failure. | |
797 | */ | |
798 | static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | |
799 | { | |
ce1b1b66 | 800 | int r, i; |
e618d306 | 801 | u32 tmp, field; |
aaa36a97 AD |
802 | |
803 | if (adev->gart.robj == NULL) { | |
804 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); | |
805 | return -EINVAL; | |
806 | } | |
ce1b1b66 ML |
807 | r = amdgpu_gart_table_vram_pin(adev); |
808 | if (r) | |
809 | return r; | |
aaa36a97 AD |
810 | /* Setup TLB control */ |
811 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | |
812 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); | |
813 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); | |
814 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); | |
815 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); | |
816 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); | |
817 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | |
818 | /* Setup L2 cache */ | |
819 | tmp = RREG32(mmVM_L2_CNTL); | |
820 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); | |
821 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); | |
822 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); | |
823 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | |
824 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | |
825 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | |
a80b3047 | 826 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); |
aaa36a97 AD |
827 | WREG32(mmVM_L2_CNTL, tmp); |
828 | tmp = RREG32(mmVM_L2_CNTL2); | |
829 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | |
830 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); | |
831 | WREG32(mmVM_L2_CNTL2, tmp); | |
e618d306 RH |
832 | |
833 | field = adev->vm_manager.fragment_size; | |
aaa36a97 AD |
834 | tmp = RREG32(mmVM_L2_CNTL3); |
835 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); | |
e618d306 RH |
836 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); |
837 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); | |
aaa36a97 AD |
838 | WREG32(mmVM_L2_CNTL3, tmp); |
839 | /* XXX: set to enable PTE/PDE in system memory */ | |
840 | tmp = RREG32(mmVM_L2_CNTL4); | |
841 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0); | |
842 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0); | |
843 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0); | |
844 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0); | |
845 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0); | |
846 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0); | |
847 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0); | |
848 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0); | |
849 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0); | |
850 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0); | |
851 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0); | |
852 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); | |
853 | WREG32(mmVM_L2_CNTL4, tmp); | |
854 | /* setup context0 */ | |
770d13b1 CK |
855 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); |
856 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); | |
aaa36a97 AD |
857 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); |
858 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | |
859 | (u32)(adev->dummy_page.addr >> 12)); | |
860 | WREG32(mmVM_CONTEXT0_CNTL2, 0); | |
861 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | |
862 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); | |
863 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); | |
864 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | |
865 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | |
866 | ||
867 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0); | |
868 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0); | |
869 | WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0); | |
870 | ||
871 | /* empty context1-15 */ | |
872 | /* FIXME start with 4G, once using 2 level pt switch to full | |
873 | * vm size space | |
874 | */ | |
875 | /* set vm size, must be a multiple of 4 */ | |
876 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); | |
25a595e4 | 877 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); |
aaa36a97 AD |
878 | for (i = 1; i < 16; i++) { |
879 | if (i < 8) | |
880 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, | |
881 | adev->gart.table_addr >> 12); | |
882 | else | |
883 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, | |
884 | adev->gart.table_addr >> 12); | |
885 | } | |
886 | ||
887 | /* enable context1-15 */ | |
888 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | |
889 | (u32)(adev->dummy_page.addr >> 12)); | |
890 | WREG32(mmVM_CONTEXT1_CNTL2, 4); | |
891 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
892 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); | |
893 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); | |
aaa36a97 | 894 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 895 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 896 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 897 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 898 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 899 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 AD |
900 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
901 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, | |
36b32a68 | 902 | adev->vm_manager.block_size - 9); |
aaa36a97 | 903 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
d9c13156 CK |
904 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
905 | gmc_v8_0_set_fault_enable_default(adev, false); | |
906 | else | |
907 | gmc_v8_0_set_fault_enable_default(adev, true); | |
aaa36a97 | 908 | |
132f34e4 | 909 | gmc_v8_0_flush_gpu_tlb(adev, 0); |
aaa36a97 | 910 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
770d13b1 | 911 | (unsigned)(adev->gmc.gart_size >> 20), |
aaa36a97 AD |
912 | (unsigned long long)adev->gart.table_addr); |
913 | adev->gart.ready = true; | |
914 | return 0; | |
915 | } | |
916 | ||
917 | static int gmc_v8_0_gart_init(struct amdgpu_device *adev) | |
918 | { | |
919 | int r; | |
920 | ||
921 | if (adev->gart.robj) { | |
922 | WARN(1, "R600 PCIE GART already initialized\n"); | |
923 | return 0; | |
924 | } | |
925 | /* Initialize common gart structure */ | |
926 | r = amdgpu_gart_init(adev); | |
927 | if (r) | |
928 | return r; | |
929 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | |
4b98e0c4 | 930 | adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE; |
aaa36a97 AD |
931 | return amdgpu_gart_table_vram_alloc(adev); |
932 | } | |
933 | ||
934 | /** | |
935 | * gmc_v8_0_gart_disable - gart disable | |
936 | * | |
937 | * @adev: amdgpu_device pointer | |
938 | * | |
939 | * This disables all VM page table (CIK). | |
940 | */ | |
941 | static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | |
942 | { | |
943 | u32 tmp; | |
944 | ||
945 | /* Disable all tables */ | |
946 | WREG32(mmVM_CONTEXT0_CNTL, 0); | |
947 | WREG32(mmVM_CONTEXT1_CNTL, 0); | |
948 | /* Setup TLB control */ | |
949 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | |
950 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); | |
951 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); | |
952 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); | |
953 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | |
954 | /* Setup L2 cache */ | |
955 | tmp = RREG32(mmVM_L2_CNTL); | |
956 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); | |
957 | WREG32(mmVM_L2_CNTL, tmp); | |
958 | WREG32(mmVM_L2_CNTL2, 0); | |
ce1b1b66 | 959 | amdgpu_gart_table_vram_unpin(adev); |
aaa36a97 AD |
960 | } |
961 | ||
962 | /** | |
963 | * gmc_v8_0_gart_fini - vm fini callback | |
964 | * | |
965 | * @adev: amdgpu_device pointer | |
966 | * | |
967 | * Tears down the driver GART/VM setup (CIK). | |
968 | */ | |
969 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | |
970 | { | |
971 | amdgpu_gart_table_vram_free(adev); | |
972 | amdgpu_gart_fini(adev); | |
973 | } | |
974 | ||
aaa36a97 AD |
975 | /** |
976 | * gmc_v8_0_vm_decode_fault - print human readable fault info | |
977 | * | |
978 | * @adev: amdgpu_device pointer | |
979 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | |
980 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | |
981 | * | |
982 | * Print human readable fault information (CIK). | |
983 | */ | |
904a3374 CK |
984 | static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, |
985 | u32 addr, u32 mc_client, unsigned pasid) | |
aaa36a97 | 986 | { |
aaa36a97 AD |
987 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); |
988 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | |
989 | PROTECTIONS); | |
990 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, | |
991 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; | |
904a3374 | 992 | u32 mc_id; |
aaa36a97 AD |
993 | |
994 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | |
995 | MEMORY_CLIENT_ID); | |
996 | ||
904a3374 CK |
997 | dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", |
998 | protections, vmid, pasid, addr, | |
aaa36a97 AD |
999 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
1000 | MEMORY_CLIENT_RW) ? | |
1001 | "write" : "read", block, mc_client, mc_id); | |
1002 | } | |
1003 | ||
81c59f54 KW |
1004 | static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) |
1005 | { | |
1006 | switch (mc_seq_vram_type) { | |
1007 | case MC_SEQ_MISC0__MT__GDDR1: | |
1008 | return AMDGPU_VRAM_TYPE_GDDR1; | |
1009 | case MC_SEQ_MISC0__MT__DDR2: | |
1010 | return AMDGPU_VRAM_TYPE_DDR2; | |
1011 | case MC_SEQ_MISC0__MT__GDDR3: | |
1012 | return AMDGPU_VRAM_TYPE_GDDR3; | |
1013 | case MC_SEQ_MISC0__MT__GDDR4: | |
1014 | return AMDGPU_VRAM_TYPE_GDDR4; | |
1015 | case MC_SEQ_MISC0__MT__GDDR5: | |
1016 | return AMDGPU_VRAM_TYPE_GDDR5; | |
1017 | case MC_SEQ_MISC0__MT__HBM: | |
1018 | return AMDGPU_VRAM_TYPE_HBM; | |
1019 | case MC_SEQ_MISC0__MT__DDR3: | |
1020 | return AMDGPU_VRAM_TYPE_DDR3; | |
1021 | default: | |
1022 | return AMDGPU_VRAM_TYPE_UNKNOWN; | |
1023 | } | |
1024 | } | |
1025 | ||
5fc3aeeb | 1026 | static int gmc_v8_0_early_init(void *handle) |
aaa36a97 | 1027 | { |
5fc3aeeb | 1028 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1029 | ||
132f34e4 | 1030 | gmc_v8_0_set_gmc_funcs(adev); |
aaa36a97 AD |
1031 | gmc_v8_0_set_irq_funcs(adev); |
1032 | ||
770d13b1 CK |
1033 | adev->gmc.shared_aperture_start = 0x2000000000000000ULL; |
1034 | adev->gmc.shared_aperture_end = | |
1035 | adev->gmc.shared_aperture_start + (4ULL << 30) - 1; | |
1036 | adev->gmc.private_aperture_start = | |
1037 | adev->gmc.shared_aperture_end + 1; | |
1038 | adev->gmc.private_aperture_end = | |
1039 | adev->gmc.private_aperture_start + (4ULL << 30) - 1; | |
8fe73328 | 1040 | |
aaa36a97 AD |
1041 | return 0; |
1042 | } | |
1043 | ||
140b519f CK |
1044 | static int gmc_v8_0_late_init(void *handle) |
1045 | { | |
1046 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1047 | ||
afc45421 | 1048 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
770d13b1 | 1049 | return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); |
afc45421 FC |
1050 | else |
1051 | return 0; | |
140b519f CK |
1052 | } |
1053 | ||
b634de4f AD |
1054 | #define mmMC_SEQ_MISC0_FIJI 0xA71 |
1055 | ||
5fc3aeeb | 1056 | static int gmc_v8_0_sw_init(void *handle) |
aaa36a97 AD |
1057 | { |
1058 | int r; | |
1059 | int dma_bits; | |
5fc3aeeb | 1060 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 1061 | |
d1518a1d | 1062 | if (adev->flags & AMD_IS_APU) { |
770d13b1 | 1063 | adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; |
d1518a1d | 1064 | } else { |
b634de4f AD |
1065 | u32 tmp; |
1066 | ||
1067 | if (adev->asic_type == CHIP_FIJI) | |
1068 | tmp = RREG32(mmMC_SEQ_MISC0_FIJI); | |
1069 | else | |
1070 | tmp = RREG32(mmMC_SEQ_MISC0); | |
d1518a1d | 1071 | tmp &= MC_SEQ_MISC0__MT__MASK; |
770d13b1 | 1072 | adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); |
d1518a1d AD |
1073 | } |
1074 | ||
770d13b1 | 1075 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); |
aaa36a97 AD |
1076 | if (r) |
1077 | return r; | |
1078 | ||
770d13b1 | 1079 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); |
aaa36a97 AD |
1080 | if (r) |
1081 | return r; | |
1082 | ||
1083 | /* Adjust VM size here. | |
1084 | * Currently set to 4GB ((1 << 20) 4k pages). | |
1085 | * Max GPUVM size for cayman and SI is 40 bits. | |
1086 | */ | |
f3368128 | 1087 | amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); |
36b32a68 | 1088 | |
aaa36a97 AD |
1089 | /* Set the internal MC address mask |
1090 | * This is the max address of the GPU's | |
1091 | * internal address space. | |
1092 | */ | |
770d13b1 | 1093 | adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ |
aaa36a97 | 1094 | |
770d13b1 | 1095 | adev->gmc.stolen_size = 256 * 1024; |
916910ad | 1096 | |
aaa36a97 AD |
1097 | /* set DMA mask + need_dma32 flags. |
1098 | * PCIE - can handle 40-bits. | |
1099 | * IGP - can handle 40-bits | |
1100 | * PCI - dma32 for legacy pci gart, 40 bits on newer asics | |
1101 | */ | |
1102 | adev->need_dma32 = false; | |
1103 | dma_bits = adev->need_dma32 ? 32 : 40; | |
fd5fd480 | 1104 | adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); |
aaa36a97 AD |
1105 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); |
1106 | if (r) { | |
1107 | adev->need_dma32 = true; | |
1108 | dma_bits = 32; | |
7ca85295 | 1109 | pr_warn("amdgpu: No suitable DMA available\n"); |
aaa36a97 AD |
1110 | } |
1111 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
1112 | if (r) { | |
1113 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | |
7ca85295 | 1114 | pr_warn("amdgpu: No coherent DMA available\n"); |
aaa36a97 | 1115 | } |
fd5fd480 | 1116 | adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); |
aaa36a97 AD |
1117 | |
1118 | r = gmc_v8_0_init_microcode(adev); | |
1119 | if (r) { | |
1120 | DRM_ERROR("Failed to load mc firmware!\n"); | |
1121 | return r; | |
1122 | } | |
1123 | ||
1124 | r = gmc_v8_0_mc_init(adev); | |
1125 | if (r) | |
1126 | return r; | |
1127 | ||
1128 | /* Memory manager */ | |
1129 | r = amdgpu_bo_init(adev); | |
1130 | if (r) | |
1131 | return r; | |
1132 | ||
1133 | r = gmc_v8_0_gart_init(adev); | |
1134 | if (r) | |
1135 | return r; | |
1136 | ||
05ec3eda CK |
1137 | /* |
1138 | * number of VMs | |
1139 | * VMID 0 is reserved for System | |
1140 | * amdgpu graphics/compute will use VMIDs 1-7 | |
1141 | * amdkfd will use VMIDs 8-15 | |
1142 | */ | |
1143 | adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS; | |
05ec3eda CK |
1144 | amdgpu_vm_manager_init(adev); |
1145 | ||
1146 | /* base offset of vram pages */ | |
1147 | if (adev->flags & AMD_IS_APU) { | |
1148 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); | |
1149 | ||
1150 | tmp <<= 22; | |
1151 | adev->vm_manager.vram_base_offset = tmp; | |
1152 | } else { | |
1153 | adev->vm_manager.vram_base_offset = 0; | |
aaa36a97 AD |
1154 | } |
1155 | ||
05ec3eda | 1156 | return 0; |
aaa36a97 AD |
1157 | } |
1158 | ||
5fc3aeeb | 1159 | static int gmc_v8_0_sw_fini(void *handle) |
aaa36a97 | 1160 | { |
5fc3aeeb | 1161 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 1162 | |
f59548c8 | 1163 | amdgpu_gem_force_release(adev); |
05ec3eda | 1164 | amdgpu_vm_manager_fini(adev); |
aaa36a97 | 1165 | gmc_v8_0_gart_fini(adev); |
aaa36a97 | 1166 | amdgpu_bo_fini(adev); |
770d13b1 CK |
1167 | release_firmware(adev->gmc.fw); |
1168 | adev->gmc.fw = NULL; | |
aaa36a97 AD |
1169 | |
1170 | return 0; | |
1171 | } | |
1172 | ||
5fc3aeeb | 1173 | static int gmc_v8_0_hw_init(void *handle) |
aaa36a97 AD |
1174 | { |
1175 | int r; | |
5fc3aeeb | 1176 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1177 | |
1178 | gmc_v8_0_init_golden_registers(adev); | |
1179 | ||
1180 | gmc_v8_0_mc_program(adev); | |
1181 | ||
8878d854 | 1182 | if (adev->asic_type == CHIP_TONGA) { |
0d52c6a1 RZ |
1183 | r = gmc_v8_0_tonga_mc_load_microcode(adev); |
1184 | if (r) { | |
1185 | DRM_ERROR("Failed to load MC firmware!\n"); | |
1186 | return r; | |
1187 | } | |
1188 | } else if (adev->asic_type == CHIP_POLARIS11 || | |
1189 | adev->asic_type == CHIP_POLARIS10 || | |
1190 | adev->asic_type == CHIP_POLARIS12) { | |
1191 | r = gmc_v8_0_polaris_mc_load_microcode(adev); | |
aaa36a97 AD |
1192 | if (r) { |
1193 | DRM_ERROR("Failed to load MC firmware!\n"); | |
1194 | return r; | |
1195 | } | |
1196 | } | |
1197 | ||
1198 | r = gmc_v8_0_gart_enable(adev); | |
1199 | if (r) | |
1200 | return r; | |
1201 | ||
1202 | return r; | |
1203 | } | |
1204 | ||
5fc3aeeb | 1205 | static int gmc_v8_0_hw_fini(void *handle) |
aaa36a97 | 1206 | { |
5fc3aeeb | 1207 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1208 | ||
770d13b1 | 1209 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
aaa36a97 AD |
1210 | gmc_v8_0_gart_disable(adev); |
1211 | ||
1212 | return 0; | |
1213 | } | |
1214 | ||
5fc3aeeb | 1215 | static int gmc_v8_0_suspend(void *handle) |
aaa36a97 | 1216 | { |
5fc3aeeb | 1217 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 1218 | |
aaa36a97 AD |
1219 | gmc_v8_0_hw_fini(adev); |
1220 | ||
1221 | return 0; | |
1222 | } | |
1223 | ||
5fc3aeeb | 1224 | static int gmc_v8_0_resume(void *handle) |
aaa36a97 AD |
1225 | { |
1226 | int r; | |
5fc3aeeb | 1227 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1228 | |
1229 | r = gmc_v8_0_hw_init(adev); | |
1230 | if (r) | |
1231 | return r; | |
1232 | ||
620f774f | 1233 | amdgpu_vmid_reset_all(adev); |
aaa36a97 | 1234 | |
b3c85a0f | 1235 | return 0; |
aaa36a97 AD |
1236 | } |
1237 | ||
5fc3aeeb | 1238 | static bool gmc_v8_0_is_idle(void *handle) |
aaa36a97 | 1239 | { |
5fc3aeeb | 1240 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1241 | u32 tmp = RREG32(mmSRBM_STATUS); |
1242 | ||
1243 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
1244 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) | |
1245 | return false; | |
1246 | ||
1247 | return true; | |
1248 | } | |
1249 | ||
5fc3aeeb | 1250 | static int gmc_v8_0_wait_for_idle(void *handle) |
aaa36a97 AD |
1251 | { |
1252 | unsigned i; | |
1253 | u32 tmp; | |
5fc3aeeb | 1254 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1255 | |
1256 | for (i = 0; i < adev->usec_timeout; i++) { | |
1257 | /* read MC_STATUS */ | |
1258 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | | |
1259 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
1260 | SRBM_STATUS__MCC_BUSY_MASK | | |
1261 | SRBM_STATUS__MCD_BUSY_MASK | | |
1262 | SRBM_STATUS__VMC_BUSY_MASK | | |
1263 | SRBM_STATUS__VMC1_BUSY_MASK); | |
1264 | if (!tmp) | |
1265 | return 0; | |
1266 | udelay(1); | |
1267 | } | |
1268 | return -ETIMEDOUT; | |
1269 | ||
1270 | } | |
1271 | ||
da146d3b | 1272 | static bool gmc_v8_0_check_soft_reset(void *handle) |
aaa36a97 | 1273 | { |
aaa36a97 | 1274 | u32 srbm_soft_reset = 0; |
5fc3aeeb | 1275 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1276 | u32 tmp = RREG32(mmSRBM_STATUS); |
1277 | ||
1278 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) | |
1279 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | |
1280 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); | |
1281 | ||
1282 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
1283 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { | |
2f7d10b3 | 1284 | if (!(adev->flags & AMD_IS_APU)) |
aaa36a97 AD |
1285 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, |
1286 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); | |
1287 | } | |
aaa36a97 | 1288 | if (srbm_soft_reset) { |
770d13b1 | 1289 | adev->gmc.srbm_soft_reset = srbm_soft_reset; |
da146d3b | 1290 | return true; |
50b0197a | 1291 | } else { |
770d13b1 | 1292 | adev->gmc.srbm_soft_reset = 0; |
da146d3b | 1293 | return false; |
50b0197a | 1294 | } |
50b0197a | 1295 | } |
aaa36a97 | 1296 | |
50b0197a CZ |
1297 | static int gmc_v8_0_pre_soft_reset(void *handle) |
1298 | { | |
1299 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1300 | ||
770d13b1 | 1301 | if (!adev->gmc.srbm_soft_reset) |
50b0197a CZ |
1302 | return 0; |
1303 | ||
e4f6b39e | 1304 | gmc_v8_0_mc_stop(adev); |
50b0197a CZ |
1305 | if (gmc_v8_0_wait_for_idle(adev)) { |
1306 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); | |
1307 | } | |
1308 | ||
1309 | return 0; | |
1310 | } | |
aaa36a97 | 1311 | |
50b0197a CZ |
1312 | static int gmc_v8_0_soft_reset(void *handle) |
1313 | { | |
1314 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1315 | u32 srbm_soft_reset; | |
1316 | ||
770d13b1 | 1317 | if (!adev->gmc.srbm_soft_reset) |
50b0197a | 1318 | return 0; |
770d13b1 | 1319 | srbm_soft_reset = adev->gmc.srbm_soft_reset; |
50b0197a CZ |
1320 | |
1321 | if (srbm_soft_reset) { | |
1322 | u32 tmp; | |
aaa36a97 AD |
1323 | |
1324 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1325 | tmp |= srbm_soft_reset; | |
1326 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | |
1327 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1328 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1329 | ||
1330 | udelay(50); | |
1331 | ||
1332 | tmp &= ~srbm_soft_reset; | |
1333 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1334 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1335 | ||
1336 | /* Wait a little for things to settle down */ | |
1337 | udelay(50); | |
aaa36a97 AD |
1338 | } |
1339 | ||
1340 | return 0; | |
1341 | } | |
1342 | ||
50b0197a CZ |
1343 | static int gmc_v8_0_post_soft_reset(void *handle) |
1344 | { | |
1345 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1346 | ||
770d13b1 | 1347 | if (!adev->gmc.srbm_soft_reset) |
50b0197a CZ |
1348 | return 0; |
1349 | ||
e4f6b39e | 1350 | gmc_v8_0_mc_resume(adev); |
50b0197a CZ |
1351 | return 0; |
1352 | } | |
1353 | ||
aaa36a97 AD |
1354 | static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
1355 | struct amdgpu_irq_src *src, | |
1356 | unsigned type, | |
1357 | enum amdgpu_interrupt_state state) | |
1358 | { | |
1359 | u32 tmp; | |
1360 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1361 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1362 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1363 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1364 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1365 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1366 | VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); | |
1367 | ||
1368 | switch (state) { | |
1369 | case AMDGPU_IRQ_STATE_DISABLE: | |
1370 | /* system context */ | |
1371 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | |
1372 | tmp &= ~bits; | |
1373 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | |
1374 | /* VMs */ | |
1375 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
1376 | tmp &= ~bits; | |
1377 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | |
1378 | break; | |
1379 | case AMDGPU_IRQ_STATE_ENABLE: | |
1380 | /* system context */ | |
1381 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | |
1382 | tmp |= bits; | |
1383 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | |
1384 | /* VMs */ | |
1385 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
1386 | tmp |= bits; | |
1387 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | |
1388 | break; | |
1389 | default: | |
1390 | break; | |
1391 | } | |
1392 | ||
1393 | return 0; | |
1394 | } | |
1395 | ||
1396 | static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | |
1397 | struct amdgpu_irq_src *source, | |
1398 | struct amdgpu_iv_entry *entry) | |
1399 | { | |
1400 | u32 addr, status, mc_client; | |
1401 | ||
edcafc02 PD |
1402 | if (amdgpu_sriov_vf(adev)) { |
1403 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | |
7ccf5aa8 | 1404 | entry->src_id, entry->src_data[0]); |
edcafc02 PD |
1405 | dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n"); |
1406 | return 0; | |
1407 | } | |
1408 | ||
aaa36a97 AD |
1409 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1410 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | |
1411 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | |
ce0c6bcd CK |
1412 | /* reset addr and status */ |
1413 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | |
1414 | ||
1415 | if (!addr && !status) | |
1416 | return 0; | |
1417 | ||
d9c13156 CK |
1418 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) |
1419 | gmc_v8_0_set_fault_enable_default(adev, false); | |
1420 | ||
01615881 EC |
1421 | if (printk_ratelimit()) { |
1422 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | |
7ccf5aa8 | 1423 | entry->src_id, entry->src_data[0]); |
01615881 EC |
1424 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
1425 | addr); | |
1426 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | |
1427 | status); | |
904a3374 CK |
1428 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client, |
1429 | entry->pasid); | |
01615881 | 1430 | } |
aaa36a97 AD |
1431 | |
1432 | return 0; | |
1433 | } | |
1434 | ||
a0d69786 | 1435 | static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev, |
3fde56b8 | 1436 | bool enable) |
a0d69786 EH |
1437 | { |
1438 | uint32_t data; | |
1439 | ||
3fde56b8 | 1440 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { |
a0d69786 EH |
1441 | data = RREG32(mmMC_HUB_MISC_HUB_CG); |
1442 | data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK; | |
1443 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1444 | ||
1445 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1446 | data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK; | |
1447 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1448 | ||
1449 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1450 | data |= MC_HUB_MISC_VM_CG__ENABLE_MASK; | |
1451 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1452 | ||
1453 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1454 | data |= MC_XPB_CLK_GAT__ENABLE_MASK; | |
1455 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1456 | ||
1457 | data = RREG32(mmATC_MISC_CG); | |
1458 | data |= ATC_MISC_CG__ENABLE_MASK; | |
1459 | WREG32(mmATC_MISC_CG, data); | |
1460 | ||
1461 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1462 | data |= MC_CITF_MISC_WR_CG__ENABLE_MASK; | |
1463 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1464 | ||
1465 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1466 | data |= MC_CITF_MISC_RD_CG__ENABLE_MASK; | |
1467 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1468 | ||
1469 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1470 | data |= MC_CITF_MISC_VM_CG__ENABLE_MASK; | |
1471 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1472 | ||
1473 | data = RREG32(mmVM_L2_CG); | |
1474 | data |= VM_L2_CG__ENABLE_MASK; | |
1475 | WREG32(mmVM_L2_CG, data); | |
1476 | } else { | |
1477 | data = RREG32(mmMC_HUB_MISC_HUB_CG); | |
1478 | data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK; | |
1479 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1480 | ||
1481 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1482 | data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK; | |
1483 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1484 | ||
1485 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1486 | data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK; | |
1487 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1488 | ||
1489 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1490 | data &= ~MC_XPB_CLK_GAT__ENABLE_MASK; | |
1491 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1492 | ||
1493 | data = RREG32(mmATC_MISC_CG); | |
1494 | data &= ~ATC_MISC_CG__ENABLE_MASK; | |
1495 | WREG32(mmATC_MISC_CG, data); | |
1496 | ||
1497 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1498 | data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK; | |
1499 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1500 | ||
1501 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1502 | data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK; | |
1503 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1504 | ||
1505 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1506 | data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK; | |
1507 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1508 | ||
1509 | data = RREG32(mmVM_L2_CG); | |
1510 | data &= ~VM_L2_CG__ENABLE_MASK; | |
1511 | WREG32(mmVM_L2_CG, data); | |
1512 | } | |
1513 | } | |
1514 | ||
1515 | static void fiji_update_mc_light_sleep(struct amdgpu_device *adev, | |
3fde56b8 | 1516 | bool enable) |
a0d69786 EH |
1517 | { |
1518 | uint32_t data; | |
1519 | ||
3fde56b8 | 1520 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { |
a0d69786 EH |
1521 | data = RREG32(mmMC_HUB_MISC_HUB_CG); |
1522 | data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; | |
1523 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1524 | ||
1525 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1526 | data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; | |
1527 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1528 | ||
1529 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1530 | data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1531 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1532 | ||
1533 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1534 | data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; | |
1535 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1536 | ||
1537 | data = RREG32(mmATC_MISC_CG); | |
1538 | data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK; | |
1539 | WREG32(mmATC_MISC_CG, data); | |
1540 | ||
1541 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1542 | data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; | |
1543 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1544 | ||
1545 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1546 | data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; | |
1547 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1548 | ||
1549 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1550 | data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1551 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1552 | ||
1553 | data = RREG32(mmVM_L2_CG); | |
1554 | data |= VM_L2_CG__MEM_LS_ENABLE_MASK; | |
1555 | WREG32(mmVM_L2_CG, data); | |
1556 | } else { | |
1557 | data = RREG32(mmMC_HUB_MISC_HUB_CG); | |
1558 | data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; | |
1559 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1560 | ||
1561 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1562 | data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; | |
1563 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1564 | ||
1565 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1566 | data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1567 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1568 | ||
1569 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1570 | data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; | |
1571 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1572 | ||
1573 | data = RREG32(mmATC_MISC_CG); | |
1574 | data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK; | |
1575 | WREG32(mmATC_MISC_CG, data); | |
1576 | ||
1577 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1578 | data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; | |
1579 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1580 | ||
1581 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1582 | data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; | |
1583 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1584 | ||
1585 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1586 | data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1587 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1588 | ||
1589 | data = RREG32(mmVM_L2_CG); | |
1590 | data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK; | |
1591 | WREG32(mmVM_L2_CG, data); | |
1592 | } | |
1593 | } | |
1594 | ||
5fc3aeeb | 1595 | static int gmc_v8_0_set_clockgating_state(void *handle, |
1596 | enum amd_clockgating_state state) | |
aaa36a97 | 1597 | { |
a0d69786 EH |
1598 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1599 | ||
ce137c04 ML |
1600 | if (amdgpu_sriov_vf(adev)) |
1601 | return 0; | |
1602 | ||
a0d69786 EH |
1603 | switch (adev->asic_type) { |
1604 | case CHIP_FIJI: | |
1605 | fiji_update_mc_medium_grain_clock_gating(adev, | |
7e913664 | 1606 | state == AMD_CG_STATE_GATE); |
a0d69786 | 1607 | fiji_update_mc_light_sleep(adev, |
7e913664 | 1608 | state == AMD_CG_STATE_GATE); |
a0d69786 EH |
1609 | break; |
1610 | default: | |
1611 | break; | |
1612 | } | |
aaa36a97 AD |
1613 | return 0; |
1614 | } | |
1615 | ||
5fc3aeeb | 1616 | static int gmc_v8_0_set_powergating_state(void *handle, |
1617 | enum amd_powergating_state state) | |
aaa36a97 AD |
1618 | { |
1619 | return 0; | |
1620 | } | |
1621 | ||
8bcab092 HR |
1622 | static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags) |
1623 | { | |
1624 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1625 | int data; | |
1626 | ||
ce137c04 ML |
1627 | if (amdgpu_sriov_vf(adev)) |
1628 | *flags = 0; | |
1629 | ||
8bcab092 HR |
1630 | /* AMD_CG_SUPPORT_MC_MGCG */ |
1631 | data = RREG32(mmMC_HUB_MISC_HUB_CG); | |
1632 | if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK) | |
1633 | *flags |= AMD_CG_SUPPORT_MC_MGCG; | |
1634 | ||
1635 | /* AMD_CG_SUPPORT_MC_LS */ | |
1636 | if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK) | |
1637 | *flags |= AMD_CG_SUPPORT_MC_LS; | |
1638 | } | |
1639 | ||
a1255107 | 1640 | static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { |
88a907d6 | 1641 | .name = "gmc_v8_0", |
aaa36a97 | 1642 | .early_init = gmc_v8_0_early_init, |
140b519f | 1643 | .late_init = gmc_v8_0_late_init, |
aaa36a97 AD |
1644 | .sw_init = gmc_v8_0_sw_init, |
1645 | .sw_fini = gmc_v8_0_sw_fini, | |
1646 | .hw_init = gmc_v8_0_hw_init, | |
1647 | .hw_fini = gmc_v8_0_hw_fini, | |
1648 | .suspend = gmc_v8_0_suspend, | |
1649 | .resume = gmc_v8_0_resume, | |
1650 | .is_idle = gmc_v8_0_is_idle, | |
1651 | .wait_for_idle = gmc_v8_0_wait_for_idle, | |
50b0197a CZ |
1652 | .check_soft_reset = gmc_v8_0_check_soft_reset, |
1653 | .pre_soft_reset = gmc_v8_0_pre_soft_reset, | |
aaa36a97 | 1654 | .soft_reset = gmc_v8_0_soft_reset, |
50b0197a | 1655 | .post_soft_reset = gmc_v8_0_post_soft_reset, |
aaa36a97 AD |
1656 | .set_clockgating_state = gmc_v8_0_set_clockgating_state, |
1657 | .set_powergating_state = gmc_v8_0_set_powergating_state, | |
8bcab092 | 1658 | .get_clockgating_state = gmc_v8_0_get_clockgating_state, |
aaa36a97 AD |
1659 | }; |
1660 | ||
132f34e4 CK |
1661 | static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { |
1662 | .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, | |
5518625d | 1663 | .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, |
132f34e4 | 1664 | .set_pte_pde = gmc_v8_0_set_pte_pde, |
603adfe8 | 1665 | .set_prt = gmc_v8_0_set_prt, |
b1166325 CK |
1666 | .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, |
1667 | .get_vm_pde = gmc_v8_0_get_vm_pde | |
aaa36a97 AD |
1668 | }; |
1669 | ||
1670 | static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { | |
1671 | .set = gmc_v8_0_vm_fault_interrupt_state, | |
1672 | .process = gmc_v8_0_process_interrupt, | |
1673 | }; | |
1674 | ||
132f34e4 | 1675 | static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev) |
aaa36a97 | 1676 | { |
132f34e4 CK |
1677 | if (adev->gmc.gmc_funcs == NULL) |
1678 | adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs; | |
aaa36a97 AD |
1679 | } |
1680 | ||
1681 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) | |
1682 | { | |
770d13b1 CK |
1683 | adev->gmc.vm_fault.num_types = 1; |
1684 | adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs; | |
aaa36a97 | 1685 | } |
a1255107 AD |
1686 | |
1687 | const struct amdgpu_ip_block_version gmc_v8_0_ip_block = | |
1688 | { | |
1689 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1690 | .major = 8, | |
1691 | .minor = 0, | |
1692 | .rev = 0, | |
1693 | .funcs = &gmc_v8_0_ip_funcs, | |
1694 | }; | |
1695 | ||
1696 | const struct amdgpu_ip_block_version gmc_v8_1_ip_block = | |
1697 | { | |
1698 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1699 | .major = 8, | |
1700 | .minor = 1, | |
1701 | .rev = 0, | |
1702 | .funcs = &gmc_v8_0_ip_funcs, | |
1703 | }; | |
1704 | ||
1705 | const struct amdgpu_ip_block_version gmc_v8_5_ip_block = | |
1706 | { | |
1707 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1708 | .major = 8, | |
1709 | .minor = 5, | |
1710 | .rev = 0, | |
1711 | .funcs = &gmc_v8_0_ip_funcs, | |
1712 | }; |