]>
Commit | Line | Data |
---|---|---|
aaa36a97 AD |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
248a1d6f | 24 | #include <drm/drmP.h> |
fd5fd480 | 25 | #include <drm/drm_cache.h> |
aaa36a97 AD |
26 | #include "amdgpu.h" |
27 | #include "gmc_v8_0.h" | |
28 | #include "amdgpu_ucode.h" | |
b97dfa27 | 29 | #include "amdgpu_amdkfd.h" |
2cddc50e | 30 | #include "amdgpu_gem.h" |
aaa36a97 AD |
31 | |
32 | #include "gmc/gmc_8_1_d.h" | |
33 | #include "gmc/gmc_8_1_sh_mask.h" | |
34 | ||
35 | #include "bif/bif_5_0_d.h" | |
36 | #include "bif/bif_5_0_sh_mask.h" | |
37 | ||
38 | #include "oss/oss_3_0_d.h" | |
39 | #include "oss/oss_3_0_sh_mask.h" | |
40 | ||
2e2bfd90 AD |
41 | #include "dce/dce_10_0_d.h" |
42 | #include "dce/dce_10_0_sh_mask.h" | |
43 | ||
aaa36a97 AD |
44 | #include "vid.h" |
45 | #include "vi.h" | |
46 | ||
1ce65f52 HW |
47 | #include "amdgpu_atombios.h" |
48 | ||
091aec0b AG |
49 | #include "ivsrcid/ivsrcid_vislands30.h" |
50 | ||
132f34e4 | 51 | static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); |
aaa36a97 | 52 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); |
34e3205e | 53 | static int gmc_v8_0_wait_for_idle(void *handle); |
aaa36a97 | 54 | |
c65444fe | 55 | MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); |
2cc0c0b5 FC |
56 | MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); |
57 | MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); | |
c4642a47 | 58 | MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); |
aaa36a97 AD |
59 | |
60 | static const u32 golden_settings_tonga_a11[] = | |
61 | { | |
62 | mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, | |
63 | mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, | |
64 | mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, | |
65 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
66 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
67 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
68 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
69 | }; | |
70 | ||
71 | static const u32 tonga_mgcg_cgcg_init[] = | |
72 | { | |
73 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | |
74 | }; | |
75 | ||
127a2628 DZ |
76 | static const u32 golden_settings_fiji_a10[] = |
77 | { | |
78 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
79 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
80 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
81 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
82 | }; | |
83 | ||
84 | static const u32 fiji_mgcg_cgcg_init[] = | |
85 | { | |
86 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | |
87 | }; | |
88 | ||
2cc0c0b5 | 89 | static const u32 golden_settings_polaris11_a11[] = |
c9778572 FC |
90 | { |
91 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
92 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
93 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
94 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff | |
95 | }; | |
96 | ||
2cc0c0b5 | 97 | static const u32 golden_settings_polaris10_a11[] = |
c9778572 FC |
98 | { |
99 | mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, | |
100 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
101 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
102 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | |
103 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff | |
104 | }; | |
105 | ||
aaa36a97 AD |
106 | static const u32 cz_mgcg_cgcg_init[] = |
107 | { | |
108 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | |
109 | }; | |
110 | ||
aade2f04 SL |
111 | static const u32 stoney_mgcg_cgcg_init[] = |
112 | { | |
0711257e | 113 | mmATC_MISC_CG, 0xffffffff, 0x000c0200, |
aade2f04 SL |
114 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 |
115 | }; | |
116 | ||
6d51c813 HR |
117 | static const u32 golden_settings_stoney_common[] = |
118 | { | |
119 | mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, | |
120 | mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 | |
121 | }; | |
aade2f04 | 122 | |
aaa36a97 AD |
123 | static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) |
124 | { | |
125 | switch (adev->asic_type) { | |
127a2628 | 126 | case CHIP_FIJI: |
9c3f2b54 AD |
127 | amdgpu_device_program_register_sequence(adev, |
128 | fiji_mgcg_cgcg_init, | |
129 | ARRAY_SIZE(fiji_mgcg_cgcg_init)); | |
130 | amdgpu_device_program_register_sequence(adev, | |
131 | golden_settings_fiji_a10, | |
132 | ARRAY_SIZE(golden_settings_fiji_a10)); | |
127a2628 | 133 | break; |
aaa36a97 | 134 | case CHIP_TONGA: |
9c3f2b54 AD |
135 | amdgpu_device_program_register_sequence(adev, |
136 | tonga_mgcg_cgcg_init, | |
137 | ARRAY_SIZE(tonga_mgcg_cgcg_init)); | |
138 | amdgpu_device_program_register_sequence(adev, | |
139 | golden_settings_tonga_a11, | |
140 | ARRAY_SIZE(golden_settings_tonga_a11)); | |
aaa36a97 | 141 | break; |
2cc0c0b5 | 142 | case CHIP_POLARIS11: |
c4642a47 | 143 | case CHIP_POLARIS12: |
13b75aac | 144 | case CHIP_VEGAM: |
9c3f2b54 AD |
145 | amdgpu_device_program_register_sequence(adev, |
146 | golden_settings_polaris11_a11, | |
147 | ARRAY_SIZE(golden_settings_polaris11_a11)); | |
c9778572 | 148 | break; |
2cc0c0b5 | 149 | case CHIP_POLARIS10: |
9c3f2b54 AD |
150 | amdgpu_device_program_register_sequence(adev, |
151 | golden_settings_polaris10_a11, | |
152 | ARRAY_SIZE(golden_settings_polaris10_a11)); | |
c9778572 | 153 | break; |
aaa36a97 | 154 | case CHIP_CARRIZO: |
9c3f2b54 AD |
155 | amdgpu_device_program_register_sequence(adev, |
156 | cz_mgcg_cgcg_init, | |
157 | ARRAY_SIZE(cz_mgcg_cgcg_init)); | |
aaa36a97 | 158 | break; |
aade2f04 | 159 | case CHIP_STONEY: |
9c3f2b54 AD |
160 | amdgpu_device_program_register_sequence(adev, |
161 | stoney_mgcg_cgcg_init, | |
162 | ARRAY_SIZE(stoney_mgcg_cgcg_init)); | |
163 | amdgpu_device_program_register_sequence(adev, | |
164 | golden_settings_stoney_common, | |
165 | ARRAY_SIZE(golden_settings_stoney_common)); | |
aade2f04 | 166 | break; |
aaa36a97 AD |
167 | default: |
168 | break; | |
169 | } | |
170 | } | |
171 | ||
e4f6b39e | 172 | static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) |
aaa36a97 AD |
173 | { |
174 | u32 blackout; | |
175 | ||
34e3205e | 176 | gmc_v8_0_wait_for_idle(adev); |
aaa36a97 AD |
177 | |
178 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | |
179 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { | |
180 | /* Block CPU access */ | |
181 | WREG32(mmBIF_FB_EN, 0); | |
182 | /* blackout the MC */ | |
183 | blackout = REG_SET_FIELD(blackout, | |
184 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1); | |
185 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); | |
186 | } | |
187 | /* wait for the MC to settle */ | |
188 | udelay(100); | |
189 | } | |
190 | ||
e4f6b39e | 191 | static void gmc_v8_0_mc_resume(struct amdgpu_device *adev) |
aaa36a97 AD |
192 | { |
193 | u32 tmp; | |
194 | ||
195 | /* unblackout the MC */ | |
196 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | |
197 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); | |
198 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); | |
199 | /* allow CPU access */ | |
200 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); | |
201 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); | |
202 | WREG32(mmBIF_FB_EN, tmp); | |
aaa36a97 AD |
203 | } |
204 | ||
205 | /** | |
206 | * gmc_v8_0_init_microcode - load ucode images from disk | |
207 | * | |
208 | * @adev: amdgpu_device pointer | |
209 | * | |
210 | * Use the firmware interface to load the ucode images into | |
211 | * the driver (not loaded into hw). | |
212 | * Returns 0 on success, error on failure. | |
213 | */ | |
214 | static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) | |
215 | { | |
216 | const char *chip_name; | |
217 | char fw_name[30]; | |
218 | int err; | |
219 | ||
220 | DRM_DEBUG("\n"); | |
221 | ||
222 | switch (adev->asic_type) { | |
aaa36a97 AD |
223 | case CHIP_TONGA: |
224 | chip_name = "tonga"; | |
225 | break; | |
2cc0c0b5 FC |
226 | case CHIP_POLARIS11: |
227 | chip_name = "polaris11"; | |
c9778572 | 228 | break; |
2cc0c0b5 FC |
229 | case CHIP_POLARIS10: |
230 | chip_name = "polaris10"; | |
c9778572 | 231 | break; |
c4642a47 JZ |
232 | case CHIP_POLARIS12: |
233 | chip_name = "polaris12"; | |
234 | break; | |
127a2628 | 235 | case CHIP_FIJI: |
aaa36a97 | 236 | case CHIP_CARRIZO: |
aade2f04 | 237 | case CHIP_STONEY: |
589ecd75 | 238 | case CHIP_VEGAM: |
aaa36a97 AD |
239 | return 0; |
240 | default: BUG(); | |
241 | } | |
242 | ||
c65444fe | 243 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); |
770d13b1 | 244 | err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); |
aaa36a97 AD |
245 | if (err) |
246 | goto out; | |
770d13b1 | 247 | err = amdgpu_ucode_validate(adev->gmc.fw); |
aaa36a97 AD |
248 | |
249 | out: | |
250 | if (err) { | |
7ca85295 | 251 | pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); |
770d13b1 CK |
252 | release_firmware(adev->gmc.fw); |
253 | adev->gmc.fw = NULL; | |
aaa36a97 AD |
254 | } |
255 | return err; | |
256 | } | |
257 | ||
258 | /** | |
0d52c6a1 | 259 | * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw |
aaa36a97 AD |
260 | * |
261 | * @adev: amdgpu_device pointer | |
262 | * | |
263 | * Load the GDDR MC ucode into the hw (CIK). | |
264 | * Returns 0 on success, error on failure. | |
265 | */ | |
0d52c6a1 | 266 | static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev) |
aaa36a97 AD |
267 | { |
268 | const struct mc_firmware_header_v1_0 *hdr; | |
269 | const __le32 *fw_data = NULL; | |
270 | const __le32 *io_mc_regs = NULL; | |
887656f0 | 271 | u32 running; |
aaa36a97 AD |
272 | int i, ucode_size, regs_size; |
273 | ||
c12d2871 AD |
274 | /* Skip MC ucode loading on SR-IOV capable boards. |
275 | * vbios does this for us in asic_init in that case. | |
4e99a44e ML |
276 | * Skip MC ucode loading on VF, because hypervisor will do that |
277 | * for this adaptor. | |
c12d2871 | 278 | */ |
4e99a44e | 279 | if (amdgpu_sriov_bios(adev)) |
c12d2871 AD |
280 | return 0; |
281 | ||
770d13b1 | 282 | if (!adev->gmc.fw) |
0d52c6a1 RZ |
283 | return -EINVAL; |
284 | ||
770d13b1 | 285 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
aaa36a97 AD |
286 | amdgpu_ucode_print_mc_hdr(&hdr->header); |
287 | ||
770d13b1 | 288 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); |
aaa36a97 AD |
289 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); |
290 | io_mc_regs = (const __le32 *) | |
770d13b1 | 291 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); |
aaa36a97 AD |
292 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
293 | fw_data = (const __le32 *) | |
770d13b1 | 294 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
aaa36a97 AD |
295 | |
296 | running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); | |
297 | ||
298 | if (running == 0) { | |
aaa36a97 AD |
299 | /* reset the engine and set to writable */ |
300 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
301 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | |
302 | ||
303 | /* load mc io regs */ | |
304 | for (i = 0; i < regs_size; i++) { | |
305 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); | |
306 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); | |
307 | } | |
308 | /* load the MC ucode */ | |
309 | for (i = 0; i < ucode_size; i++) | |
310 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); | |
311 | ||
312 | /* put the engine back into the active state */ | |
313 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
314 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | |
315 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | |
316 | ||
317 | /* wait for training to complete */ | |
318 | for (i = 0; i < adev->usec_timeout; i++) { | |
319 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | |
320 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) | |
321 | break; | |
322 | udelay(1); | |
323 | } | |
324 | for (i = 0; i < adev->usec_timeout; i++) { | |
325 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | |
326 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) | |
327 | break; | |
328 | udelay(1); | |
329 | } | |
aaa36a97 AD |
330 | } |
331 | ||
332 | return 0; | |
333 | } | |
334 | ||
0d52c6a1 RZ |
335 | static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) |
336 | { | |
337 | const struct mc_firmware_header_v1_0 *hdr; | |
338 | const __le32 *fw_data = NULL; | |
339 | const __le32 *io_mc_regs = NULL; | |
340 | u32 data, vbios_version; | |
341 | int i, ucode_size, regs_size; | |
342 | ||
343 | /* Skip MC ucode loading on SR-IOV capable boards. | |
344 | * vbios does this for us in asic_init in that case. | |
345 | * Skip MC ucode loading on VF, because hypervisor will do that | |
346 | * for this adaptor. | |
347 | */ | |
348 | if (amdgpu_sriov_bios(adev)) | |
349 | return 0; | |
350 | ||
351 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); | |
352 | data = RREG32(mmMC_SEQ_IO_DEBUG_DATA); | |
353 | vbios_version = data & 0xf; | |
354 | ||
355 | if (vbios_version == 0) | |
356 | return 0; | |
357 | ||
770d13b1 | 358 | if (!adev->gmc.fw) |
0d52c6a1 RZ |
359 | return -EINVAL; |
360 | ||
770d13b1 | 361 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
0d52c6a1 RZ |
362 | amdgpu_ucode_print_mc_hdr(&hdr->header); |
363 | ||
770d13b1 | 364 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); |
0d52c6a1 RZ |
365 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); |
366 | io_mc_regs = (const __le32 *) | |
770d13b1 | 367 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); |
0d52c6a1 RZ |
368 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
369 | fw_data = (const __le32 *) | |
770d13b1 | 370 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
0d52c6a1 RZ |
371 | |
372 | data = RREG32(mmMC_SEQ_MISC0); | |
373 | data &= ~(0x40); | |
374 | WREG32(mmMC_SEQ_MISC0, data); | |
375 | ||
376 | /* load mc io regs */ | |
377 | for (i = 0; i < regs_size; i++) { | |
378 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); | |
379 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); | |
380 | } | |
381 | ||
382 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
383 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | |
384 | ||
385 | /* load the MC ucode */ | |
386 | for (i = 0; i < ucode_size; i++) | |
387 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); | |
388 | ||
389 | /* put the engine back into the active state */ | |
390 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | |
391 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | |
392 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | |
393 | ||
394 | /* wait for training to complete */ | |
395 | for (i = 0; i < adev->usec_timeout; i++) { | |
396 | data = RREG32(mmMC_SEQ_MISC0); | |
397 | if (data & 0x80) | |
398 | break; | |
399 | udelay(1); | |
400 | } | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
aaa36a97 | 405 | static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, |
770d13b1 | 406 | struct amdgpu_gmc *mc) |
aaa36a97 | 407 | { |
e72b9912 ED |
408 | u64 base = 0; |
409 | ||
410 | if (!amdgpu_sriov_vf(adev)) | |
411 | base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; | |
9081c4cf AD |
412 | base <<= 24; |
413 | ||
961c75cf CK |
414 | amdgpu_gmc_vram_location(adev, &adev->gmc, base); |
415 | amdgpu_gmc_gart_location(adev, mc); | |
aaa36a97 AD |
416 | } |
417 | ||
418 | /** | |
419 | * gmc_v8_0_mc_program - program the GPU memory controller | |
420 | * | |
421 | * @adev: amdgpu_device pointer | |
422 | * | |
423 | * Set the location of vram, gart, and AGP in the GPU's | |
424 | * physical address space (CIK). | |
425 | */ | |
426 | static void gmc_v8_0_mc_program(struct amdgpu_device *adev) | |
427 | { | |
aaa36a97 AD |
428 | u32 tmp; |
429 | int i, j; | |
430 | ||
431 | /* Initialize HDP */ | |
432 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | |
433 | WREG32((0xb05 + j), 0x00000000); | |
434 | WREG32((0xb06 + j), 0x00000000); | |
435 | WREG32((0xb07 + j), 0x00000000); | |
436 | WREG32((0xb08 + j), 0x00000000); | |
437 | WREG32((0xb09 + j), 0x00000000); | |
438 | } | |
439 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); | |
440 | ||
34e3205e | 441 | if (gmc_v8_0_wait_for_idle((void *)adev)) { |
aaa36a97 AD |
442 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
443 | } | |
2e2bfd90 AD |
444 | if (adev->mode_info.num_crtc) { |
445 | /* Lockout access through VGA aperture*/ | |
446 | tmp = RREG32(mmVGA_HDP_CONTROL); | |
447 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); | |
448 | WREG32(mmVGA_HDP_CONTROL, tmp); | |
449 | ||
450 | /* disable VGA render */ | |
451 | tmp = RREG32(mmVGA_RENDER_CONTROL); | |
452 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); | |
453 | WREG32(mmVGA_RENDER_CONTROL, tmp); | |
454 | } | |
aaa36a97 AD |
455 | /* Update configuration */ |
456 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
770d13b1 | 457 | adev->gmc.vram_start >> 12); |
aaa36a97 | 458 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
770d13b1 | 459 | adev->gmc.vram_end >> 12); |
aaa36a97 AD |
460 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, |
461 | adev->vram_scratch.gpu_addr >> 12); | |
e72b9912 ED |
462 | |
463 | if (amdgpu_sriov_vf(adev)) { | |
770d13b1 CK |
464 | tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16; |
465 | tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF); | |
e72b9912 ED |
466 | WREG32(mmMC_VM_FB_LOCATION, tmp); |
467 | /* XXX double check these! */ | |
770d13b1 | 468 | WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); |
e72b9912 ED |
469 | WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); |
470 | WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); | |
471 | } | |
472 | ||
aaa36a97 AD |
473 | WREG32(mmMC_VM_AGP_BASE, 0); |
474 | WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); | |
475 | WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); | |
34e3205e | 476 | if (gmc_v8_0_wait_for_idle((void *)adev)) { |
aaa36a97 AD |
477 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); |
478 | } | |
aaa36a97 AD |
479 | |
480 | WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); | |
481 | ||
482 | tmp = RREG32(mmHDP_MISC_CNTL); | |
13459bd0 | 483 | tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0); |
aaa36a97 AD |
484 | WREG32(mmHDP_MISC_CNTL, tmp); |
485 | ||
486 | tmp = RREG32(mmHDP_HOST_PATH_CNTL); | |
487 | WREG32(mmHDP_HOST_PATH_CNTL, tmp); | |
488 | } | |
489 | ||
490 | /** | |
491 | * gmc_v8_0_mc_init - initialize the memory controller driver params | |
492 | * | |
493 | * @adev: amdgpu_device pointer | |
494 | * | |
495 | * Look up the amount of vram, vram width, and decide how to place | |
496 | * vram and gart within the GPU's physical address space (CIK). | |
497 | * Returns 0 for success. | |
498 | */ | |
499 | static int gmc_v8_0_mc_init(struct amdgpu_device *adev) | |
500 | { | |
d6895ad3 CK |
501 | int r; |
502 | ||
770d13b1 CK |
503 | adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); |
504 | if (!adev->gmc.vram_width) { | |
1ce65f52 HW |
505 | u32 tmp; |
506 | int chansize, numchan; | |
507 | ||
508 | /* Get VRAM informations */ | |
509 | tmp = RREG32(mmMC_ARB_RAMCFG); | |
510 | if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { | |
511 | chansize = 64; | |
512 | } else { | |
513 | chansize = 32; | |
514 | } | |
515 | tmp = RREG32(mmMC_SHARED_CHMAP); | |
516 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { | |
517 | case 0: | |
518 | default: | |
519 | numchan = 1; | |
520 | break; | |
521 | case 1: | |
522 | numchan = 2; | |
523 | break; | |
524 | case 2: | |
525 | numchan = 4; | |
526 | break; | |
527 | case 3: | |
528 | numchan = 8; | |
529 | break; | |
530 | case 4: | |
531 | numchan = 3; | |
532 | break; | |
533 | case 5: | |
534 | numchan = 6; | |
535 | break; | |
536 | case 6: | |
537 | numchan = 10; | |
538 | break; | |
539 | case 7: | |
540 | numchan = 12; | |
541 | break; | |
542 | case 8: | |
543 | numchan = 16; | |
544 | break; | |
545 | } | |
770d13b1 | 546 | adev->gmc.vram_width = numchan * chansize; |
aaa36a97 | 547 | } |
aaa36a97 | 548 | /* size in MB on si */ |
770d13b1 CK |
549 | adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
550 | adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | |
999446a7 | 551 | |
d6895ad3 CK |
552 | if (!(adev->flags & AMD_IS_APU)) { |
553 | r = amdgpu_device_resize_fb_bar(adev); | |
554 | if (r) | |
555 | return r; | |
556 | } | |
770d13b1 CK |
557 | adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); |
558 | adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); | |
d6895ad3 | 559 | |
999446a7 CK |
560 | #ifdef CONFIG_X86_64 |
561 | if (adev->flags & AMD_IS_APU) { | |
770d13b1 CK |
562 | adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; |
563 | adev->gmc.aper_size = adev->gmc.real_vram_size; | |
999446a7 CK |
564 | } |
565 | #endif | |
aaa36a97 | 566 | |
a1493cd5 | 567 | /* In case the PCI BAR is larger than the actual amount of vram */ |
770d13b1 CK |
568 | adev->gmc.visible_vram_size = adev->gmc.aper_size; |
569 | if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) | |
570 | adev->gmc.visible_vram_size = adev->gmc.real_vram_size; | |
a1493cd5 | 571 | |
c3db7b5a AD |
572 | /* set the gart size */ |
573 | if (amdgpu_gart_size == -1) { | |
574 | switch (adev->asic_type) { | |
c3db7b5a | 575 | case CHIP_POLARIS10: /* all engines support GPUVM */ |
f43c72ba | 576 | case CHIP_POLARIS11: /* all engines support GPUVM */ |
c3db7b5a | 577 | case CHIP_POLARIS12: /* all engines support GPUVM */ |
f43c72ba | 578 | case CHIP_VEGAM: /* all engines support GPUVM */ |
c3db7b5a | 579 | default: |
770d13b1 | 580 | adev->gmc.gart_size = 256ULL << 20; |
c3db7b5a AD |
581 | break; |
582 | case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ | |
583 | case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ | |
584 | case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ | |
585 | case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ | |
770d13b1 | 586 | adev->gmc.gart_size = 1024ULL << 20; |
c3db7b5a AD |
587 | break; |
588 | } | |
589 | } else { | |
770d13b1 | 590 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
c3db7b5a AD |
591 | } |
592 | ||
770d13b1 | 593 | gmc_v8_0_vram_gtt_location(adev, &adev->gmc); |
aaa36a97 AD |
594 | |
595 | return 0; | |
596 | } | |
597 | ||
598 | /* | |
599 | * GART | |
600 | * VMID 0 is the physical GPU addresses as used by the kernel. | |
601 | * VMIDs 1-15 are used for userspace clients and are handled | |
602 | * by the amdgpu vm/hsa code. | |
603 | */ | |
604 | ||
605 | /** | |
132f34e4 | 606 | * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback |
aaa36a97 AD |
607 | * |
608 | * @adev: amdgpu_device pointer | |
609 | * @vmid: vm instance to flush | |
610 | * | |
611 | * Flush the TLB for the requested page table (CIK). | |
612 | */ | |
132f34e4 | 613 | static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, |
aaa36a97 AD |
614 | uint32_t vmid) |
615 | { | |
aaa36a97 AD |
616 | /* bits 0-15 are the VM contexts0-15 */ |
617 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | |
618 | } | |
619 | ||
5518625d | 620 | static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
c633c00b | 621 | unsigned vmid, uint64_t pd_addr) |
5518625d CK |
622 | { |
623 | uint32_t reg; | |
624 | ||
625 | if (vmid < 8) | |
626 | reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; | |
627 | else | |
628 | reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; | |
629 | amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); | |
630 | ||
631 | /* bits 0-15 are the VM contexts0-15 */ | |
632 | amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); | |
633 | ||
634 | return pd_addr; | |
635 | } | |
636 | ||
c633c00b CK |
637 | static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, |
638 | unsigned pasid) | |
639 | { | |
640 | amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); | |
641 | } | |
642 | ||
aaa36a97 | 643 | /** |
132f34e4 | 644 | * gmc_v8_0_set_pte_pde - update the page tables using MMIO |
aaa36a97 AD |
645 | * |
646 | * @adev: amdgpu_device pointer | |
647 | * @cpu_pt_addr: cpu address of the page table | |
648 | * @gpu_page_idx: entry in the page table to update | |
649 | * @addr: dst addr to write into pte/pde | |
650 | * @flags: access flags | |
651 | * | |
652 | * Update the page tables using the CPU. | |
653 | */ | |
132f34e4 CK |
654 | static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, |
655 | uint32_t gpu_page_idx, uint64_t addr, | |
656 | uint64_t flags) | |
aaa36a97 AD |
657 | { |
658 | void __iomem *ptr = (void *)cpu_pt_addr; | |
659 | uint64_t value; | |
660 | ||
661 | /* | |
662 | * PTE format on VI: | |
663 | * 63:40 reserved | |
664 | * 39:12 4k physical page base address | |
665 | * 11:7 fragment | |
666 | * 6 write | |
667 | * 5 read | |
668 | * 4 exe | |
669 | * 3 reserved | |
670 | * 2 snooped | |
671 | * 1 system | |
672 | * 0 valid | |
673 | * | |
674 | * PDE format on VI: | |
675 | * 63:59 block fragment size | |
676 | * 58:40 reserved | |
677 | * 39:1 physical base address of PTE | |
678 | * bits 5:1 must be 0. | |
679 | * 0 valid | |
680 | */ | |
681 | value = addr & 0x000000FFFFFFF000ULL; | |
682 | value |= flags; | |
683 | writeq(value, ptr + (gpu_page_idx * 8)); | |
684 | ||
685 | return 0; | |
686 | } | |
687 | ||
5463545b AX |
688 | static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev, |
689 | uint32_t flags) | |
690 | { | |
691 | uint64_t pte_flag = 0; | |
692 | ||
693 | if (flags & AMDGPU_VM_PAGE_EXECUTABLE) | |
694 | pte_flag |= AMDGPU_PTE_EXECUTABLE; | |
695 | if (flags & AMDGPU_VM_PAGE_READABLE) | |
696 | pte_flag |= AMDGPU_PTE_READABLE; | |
697 | if (flags & AMDGPU_VM_PAGE_WRITEABLE) | |
698 | pte_flag |= AMDGPU_PTE_WRITEABLE; | |
699 | if (flags & AMDGPU_VM_PAGE_PRT) | |
700 | pte_flag |= AMDGPU_PTE_PRT; | |
701 | ||
702 | return pte_flag; | |
703 | } | |
704 | ||
3de676d8 CK |
705 | static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, |
706 | uint64_t *addr, uint64_t *flags) | |
b1166325 | 707 | { |
3de676d8 | 708 | BUG_ON(*addr & 0xFFFFFF0000000FFFULL); |
b1166325 CK |
709 | } |
710 | ||
d9c13156 CK |
711 | /** |
712 | * gmc_v8_0_set_fault_enable_default - update VM fault handling | |
713 | * | |
714 | * @adev: amdgpu_device pointer | |
715 | * @value: true redirects VM faults to the default page | |
716 | */ | |
717 | static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev, | |
718 | bool value) | |
719 | { | |
720 | u32 tmp; | |
721 | ||
722 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
723 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
724 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
725 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
726 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
727 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
728 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
729 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
730 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
731 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
732 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
733 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
734 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
735 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, | |
736 | EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); | |
737 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | |
738 | } | |
739 | ||
603adfe8 CK |
740 | /** |
741 | * gmc_v8_0_set_prt - set PRT VM fault | |
742 | * | |
743 | * @adev: amdgpu_device pointer | |
744 | * @enable: enable/disable VM fault handling for PRT | |
745 | */ | |
746 | static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) | |
747 | { | |
748 | u32 tmp; | |
749 | ||
770d13b1 | 750 | if (enable && !adev->gmc.prt_warning) { |
603adfe8 | 751 | dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); |
770d13b1 | 752 | adev->gmc.prt_warning = true; |
603adfe8 CK |
753 | } |
754 | ||
755 | tmp = RREG32(mmVM_PRT_CNTL); | |
756 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
757 | CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); | |
758 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
759 | CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); | |
760 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
761 | TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); | |
762 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
763 | TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); | |
764 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
765 | L2_CACHE_STORE_INVALID_ENTRIES, enable); | |
766 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
767 | L1_TLB_STORE_INVALID_ENTRIES, enable); | |
768 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, | |
769 | MASK_PDE0_FAULT, enable); | |
770 | WREG32(mmVM_PRT_CNTL, tmp); | |
771 | ||
772 | if (enable) { | |
773 | uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; | |
a3e9a15a CK |
774 | uint32_t high = adev->vm_manager.max_pfn - |
775 | (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); | |
603adfe8 CK |
776 | |
777 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); | |
778 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); | |
779 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); | |
780 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); | |
781 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); | |
782 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); | |
783 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); | |
784 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); | |
785 | } else { | |
786 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); | |
787 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); | |
788 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); | |
789 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); | |
790 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); | |
791 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); | |
792 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); | |
793 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); | |
794 | } | |
795 | } | |
796 | ||
aaa36a97 AD |
797 | /** |
798 | * gmc_v8_0_gart_enable - gart enable | |
799 | * | |
800 | * @adev: amdgpu_device pointer | |
801 | * | |
802 | * This sets up the TLBs, programs the page tables for VMID0, | |
803 | * sets up the hw for VMIDs 1-15 which are allocated on | |
804 | * demand, and sets up the global locations for the LDS, GDS, | |
805 | * and GPUVM for FSA64 clients (CIK). | |
806 | * Returns 0 for success, errors for failure. | |
807 | */ | |
808 | static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | |
809 | { | |
bdb1922a | 810 | uint64_t table_addr; |
ce1b1b66 | 811 | int r, i; |
e618d306 | 812 | u32 tmp, field; |
aaa36a97 | 813 | |
1123b989 | 814 | if (adev->gart.bo == NULL) { |
aaa36a97 AD |
815 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); |
816 | return -EINVAL; | |
817 | } | |
ce1b1b66 ML |
818 | r = amdgpu_gart_table_vram_pin(adev); |
819 | if (r) | |
820 | return r; | |
bdb1922a MD |
821 | |
822 | table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); | |
823 | ||
aaa36a97 AD |
824 | /* Setup TLB control */ |
825 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | |
826 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); | |
827 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); | |
828 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); | |
829 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); | |
830 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); | |
831 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | |
832 | /* Setup L2 cache */ | |
833 | tmp = RREG32(mmVM_L2_CNTL); | |
834 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); | |
835 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); | |
836 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); | |
837 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | |
838 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | |
839 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | |
a80b3047 | 840 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); |
aaa36a97 AD |
841 | WREG32(mmVM_L2_CNTL, tmp); |
842 | tmp = RREG32(mmVM_L2_CNTL2); | |
843 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | |
844 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); | |
845 | WREG32(mmVM_L2_CNTL2, tmp); | |
e618d306 RH |
846 | |
847 | field = adev->vm_manager.fragment_size; | |
aaa36a97 AD |
848 | tmp = RREG32(mmVM_L2_CNTL3); |
849 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); | |
e618d306 RH |
850 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); |
851 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); | |
aaa36a97 AD |
852 | WREG32(mmVM_L2_CNTL3, tmp); |
853 | /* XXX: set to enable PTE/PDE in system memory */ | |
854 | tmp = RREG32(mmVM_L2_CNTL4); | |
855 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0); | |
856 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0); | |
857 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0); | |
858 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0); | |
859 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0); | |
860 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0); | |
861 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0); | |
862 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0); | |
863 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0); | |
864 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0); | |
865 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0); | |
866 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); | |
867 | WREG32(mmVM_L2_CNTL4, tmp); | |
868 | /* setup context0 */ | |
770d13b1 CK |
869 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); |
870 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); | |
4e830fb1 | 871 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); |
aaa36a97 | 872 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
92e71b06 | 873 | (u32)(adev->dummy_page_addr >> 12)); |
aaa36a97 AD |
874 | WREG32(mmVM_CONTEXT0_CNTL2, 0); |
875 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | |
876 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); | |
877 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); | |
878 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | |
879 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | |
880 | ||
881 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0); | |
882 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0); | |
883 | WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0); | |
884 | ||
885 | /* empty context1-15 */ | |
886 | /* FIXME start with 4G, once using 2 level pt switch to full | |
887 | * vm size space | |
888 | */ | |
889 | /* set vm size, must be a multiple of 4 */ | |
890 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); | |
25a595e4 | 891 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); |
aaa36a97 AD |
892 | for (i = 1; i < 16; i++) { |
893 | if (i < 8) | |
894 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, | |
4e830fb1 | 895 | table_addr >> 12); |
aaa36a97 AD |
896 | else |
897 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, | |
4e830fb1 | 898 | table_addr >> 12); |
aaa36a97 AD |
899 | } |
900 | ||
901 | /* enable context1-15 */ | |
902 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | |
92e71b06 | 903 | (u32)(adev->dummy_page_addr >> 12)); |
aaa36a97 AD |
904 | WREG32(mmVM_CONTEXT1_CNTL2, 4); |
905 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
906 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); | |
907 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); | |
aaa36a97 | 908 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 909 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 910 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 911 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 912 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 | 913 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
aaa36a97 AD |
914 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
915 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, | |
36b32a68 | 916 | adev->vm_manager.block_size - 9); |
aaa36a97 | 917 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
d9c13156 CK |
918 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
919 | gmc_v8_0_set_fault_enable_default(adev, false); | |
920 | else | |
921 | gmc_v8_0_set_fault_enable_default(adev, true); | |
aaa36a97 | 922 | |
132f34e4 | 923 | gmc_v8_0_flush_gpu_tlb(adev, 0); |
aaa36a97 | 924 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
770d13b1 | 925 | (unsigned)(adev->gmc.gart_size >> 20), |
4e830fb1 | 926 | (unsigned long long)table_addr); |
aaa36a97 AD |
927 | adev->gart.ready = true; |
928 | return 0; | |
929 | } | |
930 | ||
931 | static int gmc_v8_0_gart_init(struct amdgpu_device *adev) | |
932 | { | |
933 | int r; | |
934 | ||
1123b989 | 935 | if (adev->gart.bo) { |
aaa36a97 AD |
936 | WARN(1, "R600 PCIE GART already initialized\n"); |
937 | return 0; | |
938 | } | |
939 | /* Initialize common gart structure */ | |
940 | r = amdgpu_gart_init(adev); | |
941 | if (r) | |
942 | return r; | |
943 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | |
4b98e0c4 | 944 | adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE; |
aaa36a97 AD |
945 | return amdgpu_gart_table_vram_alloc(adev); |
946 | } | |
947 | ||
948 | /** | |
949 | * gmc_v8_0_gart_disable - gart disable | |
950 | * | |
951 | * @adev: amdgpu_device pointer | |
952 | * | |
953 | * This disables all VM page table (CIK). | |
954 | */ | |
955 | static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | |
956 | { | |
957 | u32 tmp; | |
958 | ||
959 | /* Disable all tables */ | |
960 | WREG32(mmVM_CONTEXT0_CNTL, 0); | |
961 | WREG32(mmVM_CONTEXT1_CNTL, 0); | |
962 | /* Setup TLB control */ | |
963 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | |
964 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); | |
965 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); | |
966 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); | |
967 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | |
968 | /* Setup L2 cache */ | |
969 | tmp = RREG32(mmVM_L2_CNTL); | |
970 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); | |
971 | WREG32(mmVM_L2_CNTL, tmp); | |
972 | WREG32(mmVM_L2_CNTL2, 0); | |
ce1b1b66 | 973 | amdgpu_gart_table_vram_unpin(adev); |
aaa36a97 AD |
974 | } |
975 | ||
aaa36a97 AD |
976 | /** |
977 | * gmc_v8_0_vm_decode_fault - print human readable fault info | |
978 | * | |
979 | * @adev: amdgpu_device pointer | |
980 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | |
981 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | |
982 | * | |
983 | * Print human readable fault information (CIK). | |
984 | */ | |
904a3374 CK |
985 | static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, |
986 | u32 addr, u32 mc_client, unsigned pasid) | |
aaa36a97 | 987 | { |
aaa36a97 AD |
988 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); |
989 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | |
990 | PROTECTIONS); | |
991 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, | |
992 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; | |
904a3374 | 993 | u32 mc_id; |
aaa36a97 AD |
994 | |
995 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | |
996 | MEMORY_CLIENT_ID); | |
997 | ||
904a3374 CK |
998 | dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", |
999 | protections, vmid, pasid, addr, | |
aaa36a97 AD |
1000 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
1001 | MEMORY_CLIENT_RW) ? | |
1002 | "write" : "read", block, mc_client, mc_id); | |
1003 | } | |
1004 | ||
81c59f54 KW |
1005 | static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) |
1006 | { | |
1007 | switch (mc_seq_vram_type) { | |
1008 | case MC_SEQ_MISC0__MT__GDDR1: | |
1009 | return AMDGPU_VRAM_TYPE_GDDR1; | |
1010 | case MC_SEQ_MISC0__MT__DDR2: | |
1011 | return AMDGPU_VRAM_TYPE_DDR2; | |
1012 | case MC_SEQ_MISC0__MT__GDDR3: | |
1013 | return AMDGPU_VRAM_TYPE_GDDR3; | |
1014 | case MC_SEQ_MISC0__MT__GDDR4: | |
1015 | return AMDGPU_VRAM_TYPE_GDDR4; | |
1016 | case MC_SEQ_MISC0__MT__GDDR5: | |
1017 | return AMDGPU_VRAM_TYPE_GDDR5; | |
1018 | case MC_SEQ_MISC0__MT__HBM: | |
1019 | return AMDGPU_VRAM_TYPE_HBM; | |
1020 | case MC_SEQ_MISC0__MT__DDR3: | |
1021 | return AMDGPU_VRAM_TYPE_DDR3; | |
1022 | default: | |
1023 | return AMDGPU_VRAM_TYPE_UNKNOWN; | |
1024 | } | |
1025 | } | |
1026 | ||
5fc3aeeb | 1027 | static int gmc_v8_0_early_init(void *handle) |
aaa36a97 | 1028 | { |
5fc3aeeb | 1029 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1030 | ||
132f34e4 | 1031 | gmc_v8_0_set_gmc_funcs(adev); |
aaa36a97 AD |
1032 | gmc_v8_0_set_irq_funcs(adev); |
1033 | ||
770d13b1 CK |
1034 | adev->gmc.shared_aperture_start = 0x2000000000000000ULL; |
1035 | adev->gmc.shared_aperture_end = | |
1036 | adev->gmc.shared_aperture_start + (4ULL << 30) - 1; | |
1037 | adev->gmc.private_aperture_start = | |
1038 | adev->gmc.shared_aperture_end + 1; | |
1039 | adev->gmc.private_aperture_end = | |
1040 | adev->gmc.private_aperture_start + (4ULL << 30) - 1; | |
8fe73328 | 1041 | |
aaa36a97 AD |
1042 | return 0; |
1043 | } | |
1044 | ||
140b519f CK |
1045 | static int gmc_v8_0_late_init(void *handle) |
1046 | { | |
1047 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1048 | ||
6f752ec2 AG |
1049 | amdgpu_bo_late_init(adev); |
1050 | ||
afc45421 | 1051 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
770d13b1 | 1052 | return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); |
afc45421 FC |
1053 | else |
1054 | return 0; | |
140b519f CK |
1055 | } |
1056 | ||
ebdef28e AD |
1057 | static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) |
1058 | { | |
1059 | u32 d1vga_control = RREG32(mmD1VGA_CONTROL); | |
1060 | unsigned size; | |
1061 | ||
1062 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { | |
1063 | size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ | |
1064 | } else { | |
1065 | u32 viewport = RREG32(mmVIEWPORT_SIZE); | |
1066 | size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * | |
1067 | REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * | |
1068 | 4); | |
1069 | } | |
1070 | /* return 0 if the pre-OS buffer uses up most of vram */ | |
1071 | if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) | |
1072 | return 0; | |
1073 | return size; | |
1074 | } | |
1075 | ||
b634de4f AD |
1076 | #define mmMC_SEQ_MISC0_FIJI 0xA71 |
1077 | ||
5fc3aeeb | 1078 | static int gmc_v8_0_sw_init(void *handle) |
aaa36a97 AD |
1079 | { |
1080 | int r; | |
1081 | int dma_bits; | |
5fc3aeeb | 1082 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 1083 | |
d1518a1d | 1084 | if (adev->flags & AMD_IS_APU) { |
770d13b1 | 1085 | adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; |
d1518a1d | 1086 | } else { |
b634de4f AD |
1087 | u32 tmp; |
1088 | ||
f43c72ba LL |
1089 | if ((adev->asic_type == CHIP_FIJI) || |
1090 | (adev->asic_type == CHIP_VEGAM)) | |
b634de4f AD |
1091 | tmp = RREG32(mmMC_SEQ_MISC0_FIJI); |
1092 | else | |
1093 | tmp = RREG32(mmMC_SEQ_MISC0); | |
d1518a1d | 1094 | tmp &= MC_SEQ_MISC0__MT__MASK; |
770d13b1 | 1095 | adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); |
d1518a1d AD |
1096 | } |
1097 | ||
091aec0b | 1098 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); |
aaa36a97 AD |
1099 | if (r) |
1100 | return r; | |
1101 | ||
091aec0b | 1102 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); |
aaa36a97 AD |
1103 | if (r) |
1104 | return r; | |
1105 | ||
1106 | /* Adjust VM size here. | |
1107 | * Currently set to 4GB ((1 << 20) 4k pages). | |
1108 | * Max GPUVM size for cayman and SI is 40 bits. | |
1109 | */ | |
f3368128 | 1110 | amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); |
36b32a68 | 1111 | |
aaa36a97 AD |
1112 | /* Set the internal MC address mask |
1113 | * This is the max address of the GPU's | |
1114 | * internal address space. | |
1115 | */ | |
770d13b1 | 1116 | adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ |
aaa36a97 AD |
1117 | |
1118 | /* set DMA mask + need_dma32 flags. | |
1119 | * PCIE - can handle 40-bits. | |
1120 | * IGP - can handle 40-bits | |
1121 | * PCI - dma32 for legacy pci gart, 40 bits on newer asics | |
1122 | */ | |
1123 | adev->need_dma32 = false; | |
1124 | dma_bits = adev->need_dma32 ? 32 : 40; | |
1125 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
1126 | if (r) { | |
1127 | adev->need_dma32 = true; | |
1128 | dma_bits = 32; | |
7ca85295 | 1129 | pr_warn("amdgpu: No suitable DMA available\n"); |
aaa36a97 AD |
1130 | } |
1131 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
1132 | if (r) { | |
1133 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | |
7ca85295 | 1134 | pr_warn("amdgpu: No coherent DMA available\n"); |
aaa36a97 | 1135 | } |
fd5fd480 | 1136 | adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); |
aaa36a97 AD |
1137 | |
1138 | r = gmc_v8_0_init_microcode(adev); | |
1139 | if (r) { | |
1140 | DRM_ERROR("Failed to load mc firmware!\n"); | |
1141 | return r; | |
1142 | } | |
1143 | ||
1144 | r = gmc_v8_0_mc_init(adev); | |
1145 | if (r) | |
1146 | return r; | |
1147 | ||
ebdef28e AD |
1148 | adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev); |
1149 | ||
aaa36a97 AD |
1150 | /* Memory manager */ |
1151 | r = amdgpu_bo_init(adev); | |
1152 | if (r) | |
1153 | return r; | |
1154 | ||
1155 | r = gmc_v8_0_gart_init(adev); | |
1156 | if (r) | |
1157 | return r; | |
1158 | ||
05ec3eda CK |
1159 | /* |
1160 | * number of VMs | |
1161 | * VMID 0 is reserved for System | |
1162 | * amdgpu graphics/compute will use VMIDs 1-7 | |
1163 | * amdkfd will use VMIDs 8-15 | |
1164 | */ | |
1165 | adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS; | |
05ec3eda CK |
1166 | amdgpu_vm_manager_init(adev); |
1167 | ||
1168 | /* base offset of vram pages */ | |
1169 | if (adev->flags & AMD_IS_APU) { | |
1170 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); | |
1171 | ||
1172 | tmp <<= 22; | |
1173 | adev->vm_manager.vram_base_offset = tmp; | |
1174 | } else { | |
1175 | adev->vm_manager.vram_base_offset = 0; | |
aaa36a97 AD |
1176 | } |
1177 | ||
b97dfa27 | 1178 | adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), |
1179 | GFP_KERNEL); | |
1180 | if (!adev->gmc.vm_fault_info) | |
1181 | return -ENOMEM; | |
1182 | atomic_set(&adev->gmc.vm_fault_info_updated, 0); | |
1183 | ||
05ec3eda | 1184 | return 0; |
aaa36a97 AD |
1185 | } |
1186 | ||
5fc3aeeb | 1187 | static int gmc_v8_0_sw_fini(void *handle) |
aaa36a97 | 1188 | { |
5fc3aeeb | 1189 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 1190 | |
f59548c8 | 1191 | amdgpu_gem_force_release(adev); |
05ec3eda | 1192 | amdgpu_vm_manager_fini(adev); |
b97dfa27 | 1193 | kfree(adev->gmc.vm_fault_info); |
a3d9103e | 1194 | amdgpu_gart_table_vram_free(adev); |
aaa36a97 | 1195 | amdgpu_bo_fini(adev); |
a3d9103e | 1196 | amdgpu_gart_fini(adev); |
770d13b1 CK |
1197 | release_firmware(adev->gmc.fw); |
1198 | adev->gmc.fw = NULL; | |
aaa36a97 AD |
1199 | |
1200 | return 0; | |
1201 | } | |
1202 | ||
5fc3aeeb | 1203 | static int gmc_v8_0_hw_init(void *handle) |
aaa36a97 AD |
1204 | { |
1205 | int r; | |
5fc3aeeb | 1206 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1207 | |
1208 | gmc_v8_0_init_golden_registers(adev); | |
1209 | ||
1210 | gmc_v8_0_mc_program(adev); | |
1211 | ||
8878d854 | 1212 | if (adev->asic_type == CHIP_TONGA) { |
0d52c6a1 RZ |
1213 | r = gmc_v8_0_tonga_mc_load_microcode(adev); |
1214 | if (r) { | |
1215 | DRM_ERROR("Failed to load MC firmware!\n"); | |
1216 | return r; | |
1217 | } | |
1218 | } else if (adev->asic_type == CHIP_POLARIS11 || | |
1219 | adev->asic_type == CHIP_POLARIS10 || | |
1220 | adev->asic_type == CHIP_POLARIS12) { | |
1221 | r = gmc_v8_0_polaris_mc_load_microcode(adev); | |
aaa36a97 AD |
1222 | if (r) { |
1223 | DRM_ERROR("Failed to load MC firmware!\n"); | |
1224 | return r; | |
1225 | } | |
1226 | } | |
1227 | ||
1228 | r = gmc_v8_0_gart_enable(adev); | |
1229 | if (r) | |
1230 | return r; | |
1231 | ||
1232 | return r; | |
1233 | } | |
1234 | ||
5fc3aeeb | 1235 | static int gmc_v8_0_hw_fini(void *handle) |
aaa36a97 | 1236 | { |
5fc3aeeb | 1237 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1238 | ||
770d13b1 | 1239 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
aaa36a97 AD |
1240 | gmc_v8_0_gart_disable(adev); |
1241 | ||
1242 | return 0; | |
1243 | } | |
1244 | ||
5fc3aeeb | 1245 | static int gmc_v8_0_suspend(void *handle) |
aaa36a97 | 1246 | { |
5fc3aeeb | 1247 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 | 1248 | |
aaa36a97 AD |
1249 | gmc_v8_0_hw_fini(adev); |
1250 | ||
1251 | return 0; | |
1252 | } | |
1253 | ||
5fc3aeeb | 1254 | static int gmc_v8_0_resume(void *handle) |
aaa36a97 AD |
1255 | { |
1256 | int r; | |
5fc3aeeb | 1257 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1258 | |
1259 | r = gmc_v8_0_hw_init(adev); | |
1260 | if (r) | |
1261 | return r; | |
1262 | ||
620f774f | 1263 | amdgpu_vmid_reset_all(adev); |
aaa36a97 | 1264 | |
b3c85a0f | 1265 | return 0; |
aaa36a97 AD |
1266 | } |
1267 | ||
5fc3aeeb | 1268 | static bool gmc_v8_0_is_idle(void *handle) |
aaa36a97 | 1269 | { |
5fc3aeeb | 1270 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1271 | u32 tmp = RREG32(mmSRBM_STATUS); |
1272 | ||
1273 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
1274 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) | |
1275 | return false; | |
1276 | ||
1277 | return true; | |
1278 | } | |
1279 | ||
5fc3aeeb | 1280 | static int gmc_v8_0_wait_for_idle(void *handle) |
aaa36a97 AD |
1281 | { |
1282 | unsigned i; | |
1283 | u32 tmp; | |
5fc3aeeb | 1284 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1285 | |
1286 | for (i = 0; i < adev->usec_timeout; i++) { | |
1287 | /* read MC_STATUS */ | |
1288 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | | |
1289 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
1290 | SRBM_STATUS__MCC_BUSY_MASK | | |
1291 | SRBM_STATUS__MCD_BUSY_MASK | | |
1292 | SRBM_STATUS__VMC_BUSY_MASK | | |
1293 | SRBM_STATUS__VMC1_BUSY_MASK); | |
1294 | if (!tmp) | |
1295 | return 0; | |
1296 | udelay(1); | |
1297 | } | |
1298 | return -ETIMEDOUT; | |
1299 | ||
1300 | } | |
1301 | ||
da146d3b | 1302 | static bool gmc_v8_0_check_soft_reset(void *handle) |
aaa36a97 | 1303 | { |
aaa36a97 | 1304 | u32 srbm_soft_reset = 0; |
5fc3aeeb | 1305 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
aaa36a97 AD |
1306 | u32 tmp = RREG32(mmSRBM_STATUS); |
1307 | ||
1308 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) | |
1309 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | |
1310 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); | |
1311 | ||
1312 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | |
1313 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { | |
2f7d10b3 | 1314 | if (!(adev->flags & AMD_IS_APU)) |
aaa36a97 AD |
1315 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, |
1316 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); | |
1317 | } | |
aaa36a97 | 1318 | if (srbm_soft_reset) { |
770d13b1 | 1319 | adev->gmc.srbm_soft_reset = srbm_soft_reset; |
da146d3b | 1320 | return true; |
50b0197a | 1321 | } else { |
770d13b1 | 1322 | adev->gmc.srbm_soft_reset = 0; |
da146d3b | 1323 | return false; |
50b0197a | 1324 | } |
50b0197a | 1325 | } |
aaa36a97 | 1326 | |
50b0197a CZ |
1327 | static int gmc_v8_0_pre_soft_reset(void *handle) |
1328 | { | |
1329 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1330 | ||
770d13b1 | 1331 | if (!adev->gmc.srbm_soft_reset) |
50b0197a CZ |
1332 | return 0; |
1333 | ||
e4f6b39e | 1334 | gmc_v8_0_mc_stop(adev); |
50b0197a CZ |
1335 | if (gmc_v8_0_wait_for_idle(adev)) { |
1336 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); | |
1337 | } | |
1338 | ||
1339 | return 0; | |
1340 | } | |
aaa36a97 | 1341 | |
50b0197a CZ |
1342 | static int gmc_v8_0_soft_reset(void *handle) |
1343 | { | |
1344 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1345 | u32 srbm_soft_reset; | |
1346 | ||
770d13b1 | 1347 | if (!adev->gmc.srbm_soft_reset) |
50b0197a | 1348 | return 0; |
770d13b1 | 1349 | srbm_soft_reset = adev->gmc.srbm_soft_reset; |
50b0197a CZ |
1350 | |
1351 | if (srbm_soft_reset) { | |
1352 | u32 tmp; | |
aaa36a97 AD |
1353 | |
1354 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1355 | tmp |= srbm_soft_reset; | |
1356 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | |
1357 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1358 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1359 | ||
1360 | udelay(50); | |
1361 | ||
1362 | tmp &= ~srbm_soft_reset; | |
1363 | WREG32(mmSRBM_SOFT_RESET, tmp); | |
1364 | tmp = RREG32(mmSRBM_SOFT_RESET); | |
1365 | ||
1366 | /* Wait a little for things to settle down */ | |
1367 | udelay(50); | |
aaa36a97 AD |
1368 | } |
1369 | ||
1370 | return 0; | |
1371 | } | |
1372 | ||
50b0197a CZ |
1373 | static int gmc_v8_0_post_soft_reset(void *handle) |
1374 | { | |
1375 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1376 | ||
770d13b1 | 1377 | if (!adev->gmc.srbm_soft_reset) |
50b0197a CZ |
1378 | return 0; |
1379 | ||
e4f6b39e | 1380 | gmc_v8_0_mc_resume(adev); |
50b0197a CZ |
1381 | return 0; |
1382 | } | |
1383 | ||
aaa36a97 AD |
1384 | static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
1385 | struct amdgpu_irq_src *src, | |
1386 | unsigned type, | |
1387 | enum amdgpu_interrupt_state state) | |
1388 | { | |
1389 | u32 tmp; | |
1390 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1391 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1392 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1393 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1394 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1395 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
1396 | VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); | |
1397 | ||
1398 | switch (state) { | |
1399 | case AMDGPU_IRQ_STATE_DISABLE: | |
1400 | /* system context */ | |
1401 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | |
1402 | tmp &= ~bits; | |
1403 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | |
1404 | /* VMs */ | |
1405 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
1406 | tmp &= ~bits; | |
1407 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | |
1408 | break; | |
1409 | case AMDGPU_IRQ_STATE_ENABLE: | |
1410 | /* system context */ | |
1411 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | |
1412 | tmp |= bits; | |
1413 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | |
1414 | /* VMs */ | |
1415 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | |
1416 | tmp |= bits; | |
1417 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | |
1418 | break; | |
1419 | default: | |
1420 | break; | |
1421 | } | |
1422 | ||
1423 | return 0; | |
1424 | } | |
1425 | ||
1426 | static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | |
1427 | struct amdgpu_irq_src *source, | |
1428 | struct amdgpu_iv_entry *entry) | |
1429 | { | |
b97dfa27 | 1430 | u32 addr, status, mc_client, vmid; |
aaa36a97 | 1431 | |
edcafc02 PD |
1432 | if (amdgpu_sriov_vf(adev)) { |
1433 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | |
7ccf5aa8 | 1434 | entry->src_id, entry->src_data[0]); |
edcafc02 PD |
1435 | dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n"); |
1436 | return 0; | |
1437 | } | |
1438 | ||
aaa36a97 AD |
1439 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1440 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | |
1441 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | |
ce0c6bcd CK |
1442 | /* reset addr and status */ |
1443 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | |
1444 | ||
1445 | if (!addr && !status) | |
1446 | return 0; | |
1447 | ||
d9c13156 CK |
1448 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) |
1449 | gmc_v8_0_set_fault_enable_default(adev, false); | |
1450 | ||
01615881 | 1451 | if (printk_ratelimit()) { |
efaa9646 AG |
1452 | struct amdgpu_task_info task_info = { 0 }; |
1453 | ||
1454 | amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); | |
1455 | ||
1456 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n", | |
1457 | entry->src_id, entry->src_data[0], task_info.process_name, | |
1458 | task_info.tgid, task_info.task_name, task_info.pid); | |
01615881 EC |
1459 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
1460 | addr); | |
1461 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | |
1462 | status); | |
904a3374 CK |
1463 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client, |
1464 | entry->pasid); | |
01615881 | 1465 | } |
aaa36a97 | 1466 | |
b97dfa27 | 1467 | vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
1468 | VMID); | |
1469 | if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) | |
1470 | && !atomic_read(&adev->gmc.vm_fault_info_updated)) { | |
1471 | struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; | |
1472 | u32 protections = REG_GET_FIELD(status, | |
1473 | VM_CONTEXT1_PROTECTION_FAULT_STATUS, | |
1474 | PROTECTIONS); | |
1475 | ||
1476 | info->vmid = vmid; | |
1477 | info->mc_id = REG_GET_FIELD(status, | |
1478 | VM_CONTEXT1_PROTECTION_FAULT_STATUS, | |
1479 | MEMORY_CLIENT_ID); | |
1480 | info->status = status; | |
1481 | info->page_addr = addr; | |
1482 | info->prot_valid = protections & 0x7 ? true : false; | |
1483 | info->prot_read = protections & 0x8 ? true : false; | |
1484 | info->prot_write = protections & 0x10 ? true : false; | |
1485 | info->prot_exec = protections & 0x20 ? true : false; | |
1486 | mb(); | |
1487 | atomic_set(&adev->gmc.vm_fault_info_updated, 1); | |
1488 | } | |
1489 | ||
aaa36a97 AD |
1490 | return 0; |
1491 | } | |
1492 | ||
a0d69786 | 1493 | static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev, |
3fde56b8 | 1494 | bool enable) |
a0d69786 EH |
1495 | { |
1496 | uint32_t data; | |
1497 | ||
3fde56b8 | 1498 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { |
a0d69786 EH |
1499 | data = RREG32(mmMC_HUB_MISC_HUB_CG); |
1500 | data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK; | |
1501 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1502 | ||
1503 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1504 | data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK; | |
1505 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1506 | ||
1507 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1508 | data |= MC_HUB_MISC_VM_CG__ENABLE_MASK; | |
1509 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1510 | ||
1511 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1512 | data |= MC_XPB_CLK_GAT__ENABLE_MASK; | |
1513 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1514 | ||
1515 | data = RREG32(mmATC_MISC_CG); | |
1516 | data |= ATC_MISC_CG__ENABLE_MASK; | |
1517 | WREG32(mmATC_MISC_CG, data); | |
1518 | ||
1519 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1520 | data |= MC_CITF_MISC_WR_CG__ENABLE_MASK; | |
1521 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1522 | ||
1523 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1524 | data |= MC_CITF_MISC_RD_CG__ENABLE_MASK; | |
1525 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1526 | ||
1527 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1528 | data |= MC_CITF_MISC_VM_CG__ENABLE_MASK; | |
1529 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1530 | ||
1531 | data = RREG32(mmVM_L2_CG); | |
1532 | data |= VM_L2_CG__ENABLE_MASK; | |
1533 | WREG32(mmVM_L2_CG, data); | |
1534 | } else { | |
1535 | data = RREG32(mmMC_HUB_MISC_HUB_CG); | |
1536 | data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK; | |
1537 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1538 | ||
1539 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1540 | data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK; | |
1541 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1542 | ||
1543 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1544 | data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK; | |
1545 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1546 | ||
1547 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1548 | data &= ~MC_XPB_CLK_GAT__ENABLE_MASK; | |
1549 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1550 | ||
1551 | data = RREG32(mmATC_MISC_CG); | |
1552 | data &= ~ATC_MISC_CG__ENABLE_MASK; | |
1553 | WREG32(mmATC_MISC_CG, data); | |
1554 | ||
1555 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1556 | data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK; | |
1557 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1558 | ||
1559 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1560 | data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK; | |
1561 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1562 | ||
1563 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1564 | data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK; | |
1565 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1566 | ||
1567 | data = RREG32(mmVM_L2_CG); | |
1568 | data &= ~VM_L2_CG__ENABLE_MASK; | |
1569 | WREG32(mmVM_L2_CG, data); | |
1570 | } | |
1571 | } | |
1572 | ||
1573 | static void fiji_update_mc_light_sleep(struct amdgpu_device *adev, | |
3fde56b8 | 1574 | bool enable) |
a0d69786 EH |
1575 | { |
1576 | uint32_t data; | |
1577 | ||
3fde56b8 | 1578 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { |
a0d69786 EH |
1579 | data = RREG32(mmMC_HUB_MISC_HUB_CG); |
1580 | data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; | |
1581 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1582 | ||
1583 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1584 | data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; | |
1585 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1586 | ||
1587 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1588 | data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1589 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1590 | ||
1591 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1592 | data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; | |
1593 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1594 | ||
1595 | data = RREG32(mmATC_MISC_CG); | |
1596 | data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK; | |
1597 | WREG32(mmATC_MISC_CG, data); | |
1598 | ||
1599 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1600 | data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; | |
1601 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1602 | ||
1603 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1604 | data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; | |
1605 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1606 | ||
1607 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1608 | data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1609 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1610 | ||
1611 | data = RREG32(mmVM_L2_CG); | |
1612 | data |= VM_L2_CG__MEM_LS_ENABLE_MASK; | |
1613 | WREG32(mmVM_L2_CG, data); | |
1614 | } else { | |
1615 | data = RREG32(mmMC_HUB_MISC_HUB_CG); | |
1616 | data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; | |
1617 | WREG32(mmMC_HUB_MISC_HUB_CG, data); | |
1618 | ||
1619 | data = RREG32(mmMC_HUB_MISC_SIP_CG); | |
1620 | data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; | |
1621 | WREG32(mmMC_HUB_MISC_SIP_CG, data); | |
1622 | ||
1623 | data = RREG32(mmMC_HUB_MISC_VM_CG); | |
1624 | data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1625 | WREG32(mmMC_HUB_MISC_VM_CG, data); | |
1626 | ||
1627 | data = RREG32(mmMC_XPB_CLK_GAT); | |
1628 | data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; | |
1629 | WREG32(mmMC_XPB_CLK_GAT, data); | |
1630 | ||
1631 | data = RREG32(mmATC_MISC_CG); | |
1632 | data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK; | |
1633 | WREG32(mmATC_MISC_CG, data); | |
1634 | ||
1635 | data = RREG32(mmMC_CITF_MISC_WR_CG); | |
1636 | data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; | |
1637 | WREG32(mmMC_CITF_MISC_WR_CG, data); | |
1638 | ||
1639 | data = RREG32(mmMC_CITF_MISC_RD_CG); | |
1640 | data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; | |
1641 | WREG32(mmMC_CITF_MISC_RD_CG, data); | |
1642 | ||
1643 | data = RREG32(mmMC_CITF_MISC_VM_CG); | |
1644 | data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; | |
1645 | WREG32(mmMC_CITF_MISC_VM_CG, data); | |
1646 | ||
1647 | data = RREG32(mmVM_L2_CG); | |
1648 | data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK; | |
1649 | WREG32(mmVM_L2_CG, data); | |
1650 | } | |
1651 | } | |
1652 | ||
5fc3aeeb | 1653 | static int gmc_v8_0_set_clockgating_state(void *handle, |
1654 | enum amd_clockgating_state state) | |
aaa36a97 | 1655 | { |
a0d69786 EH |
1656 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1657 | ||
ce137c04 ML |
1658 | if (amdgpu_sriov_vf(adev)) |
1659 | return 0; | |
1660 | ||
a0d69786 EH |
1661 | switch (adev->asic_type) { |
1662 | case CHIP_FIJI: | |
1663 | fiji_update_mc_medium_grain_clock_gating(adev, | |
7e913664 | 1664 | state == AMD_CG_STATE_GATE); |
a0d69786 | 1665 | fiji_update_mc_light_sleep(adev, |
7e913664 | 1666 | state == AMD_CG_STATE_GATE); |
a0d69786 EH |
1667 | break; |
1668 | default: | |
1669 | break; | |
1670 | } | |
aaa36a97 AD |
1671 | return 0; |
1672 | } | |
1673 | ||
5fc3aeeb | 1674 | static int gmc_v8_0_set_powergating_state(void *handle, |
1675 | enum amd_powergating_state state) | |
aaa36a97 AD |
1676 | { |
1677 | return 0; | |
1678 | } | |
1679 | ||
8bcab092 HR |
1680 | static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags) |
1681 | { | |
1682 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1683 | int data; | |
1684 | ||
ce137c04 ML |
1685 | if (amdgpu_sriov_vf(adev)) |
1686 | *flags = 0; | |
1687 | ||
8bcab092 HR |
1688 | /* AMD_CG_SUPPORT_MC_MGCG */ |
1689 | data = RREG32(mmMC_HUB_MISC_HUB_CG); | |
1690 | if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK) | |
1691 | *flags |= AMD_CG_SUPPORT_MC_MGCG; | |
1692 | ||
1693 | /* AMD_CG_SUPPORT_MC_LS */ | |
1694 | if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK) | |
1695 | *flags |= AMD_CG_SUPPORT_MC_LS; | |
1696 | } | |
1697 | ||
a1255107 | 1698 | static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { |
88a907d6 | 1699 | .name = "gmc_v8_0", |
aaa36a97 | 1700 | .early_init = gmc_v8_0_early_init, |
140b519f | 1701 | .late_init = gmc_v8_0_late_init, |
aaa36a97 AD |
1702 | .sw_init = gmc_v8_0_sw_init, |
1703 | .sw_fini = gmc_v8_0_sw_fini, | |
1704 | .hw_init = gmc_v8_0_hw_init, | |
1705 | .hw_fini = gmc_v8_0_hw_fini, | |
1706 | .suspend = gmc_v8_0_suspend, | |
1707 | .resume = gmc_v8_0_resume, | |
1708 | .is_idle = gmc_v8_0_is_idle, | |
1709 | .wait_for_idle = gmc_v8_0_wait_for_idle, | |
50b0197a CZ |
1710 | .check_soft_reset = gmc_v8_0_check_soft_reset, |
1711 | .pre_soft_reset = gmc_v8_0_pre_soft_reset, | |
aaa36a97 | 1712 | .soft_reset = gmc_v8_0_soft_reset, |
50b0197a | 1713 | .post_soft_reset = gmc_v8_0_post_soft_reset, |
aaa36a97 AD |
1714 | .set_clockgating_state = gmc_v8_0_set_clockgating_state, |
1715 | .set_powergating_state = gmc_v8_0_set_powergating_state, | |
8bcab092 | 1716 | .get_clockgating_state = gmc_v8_0_get_clockgating_state, |
aaa36a97 AD |
1717 | }; |
1718 | ||
132f34e4 CK |
1719 | static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { |
1720 | .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, | |
5518625d | 1721 | .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, |
c633c00b | 1722 | .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, |
132f34e4 | 1723 | .set_pte_pde = gmc_v8_0_set_pte_pde, |
603adfe8 | 1724 | .set_prt = gmc_v8_0_set_prt, |
b1166325 CK |
1725 | .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, |
1726 | .get_vm_pde = gmc_v8_0_get_vm_pde | |
aaa36a97 AD |
1727 | }; |
1728 | ||
1729 | static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { | |
1730 | .set = gmc_v8_0_vm_fault_interrupt_state, | |
1731 | .process = gmc_v8_0_process_interrupt, | |
1732 | }; | |
1733 | ||
132f34e4 | 1734 | static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev) |
aaa36a97 | 1735 | { |
132f34e4 CK |
1736 | if (adev->gmc.gmc_funcs == NULL) |
1737 | adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs; | |
aaa36a97 AD |
1738 | } |
1739 | ||
1740 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) | |
1741 | { | |
770d13b1 CK |
1742 | adev->gmc.vm_fault.num_types = 1; |
1743 | adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs; | |
aaa36a97 | 1744 | } |
a1255107 AD |
1745 | |
1746 | const struct amdgpu_ip_block_version gmc_v8_0_ip_block = | |
1747 | { | |
1748 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1749 | .major = 8, | |
1750 | .minor = 0, | |
1751 | .rev = 0, | |
1752 | .funcs = &gmc_v8_0_ip_funcs, | |
1753 | }; | |
1754 | ||
1755 | const struct amdgpu_ip_block_version gmc_v8_1_ip_block = | |
1756 | { | |
1757 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1758 | .major = 8, | |
1759 | .minor = 1, | |
1760 | .rev = 0, | |
1761 | .funcs = &gmc_v8_0_ip_funcs, | |
1762 | }; | |
1763 | ||
1764 | const struct amdgpu_ip_block_version gmc_v8_5_ip_block = | |
1765 | { | |
1766 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1767 | .major = 8, | |
1768 | .minor = 5, | |
1769 | .rev = 0, | |
1770 | .funcs = &gmc_v8_0_ip_funcs, | |
1771 | }; |