]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
Merge branch 'ib-chrome-platform-atmel-mxt-ts-device-properties' of git://git.kernel...
[mirror_ubuntu-disco-kernel.git] / drivers / gpu / drm / amd / amdgpu / gmc_v8_0.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <drm/drmP.h>
25 #include <drm/drm_cache.h>
26 #include "amdgpu.h"
27 #include "gmc_v8_0.h"
28 #include "amdgpu_ucode.h"
29
30 #include "gmc/gmc_8_1_d.h"
31 #include "gmc/gmc_8_1_sh_mask.h"
32
33 #include "bif/bif_5_0_d.h"
34 #include "bif/bif_5_0_sh_mask.h"
35
36 #include "oss/oss_3_0_d.h"
37 #include "oss/oss_3_0_sh_mask.h"
38
39 #include "dce/dce_10_0_d.h"
40 #include "dce/dce_10_0_sh_mask.h"
41
42 #include "vid.h"
43 #include "vi.h"
44
45 #include "amdgpu_atombios.h"
46
47
48 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
49 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
50 static int gmc_v8_0_wait_for_idle(void *handle);
51
52 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
53 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
54 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
55 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
56
57 static const u32 golden_settings_tonga_a11[] =
58 {
59 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
60 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
61 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
62 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
63 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
64 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
65 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
66 };
67
68 static const u32 tonga_mgcg_cgcg_init[] =
69 {
70 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
71 };
72
73 static const u32 golden_settings_fiji_a10[] =
74 {
75 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
76 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
77 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
78 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
79 };
80
81 static const u32 fiji_mgcg_cgcg_init[] =
82 {
83 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
84 };
85
86 static const u32 golden_settings_polaris11_a11[] =
87 {
88 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
89 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
90 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
91 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
92 };
93
94 static const u32 golden_settings_polaris10_a11[] =
95 {
96 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
97 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
98 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
101 };
102
103 static const u32 cz_mgcg_cgcg_init[] =
104 {
105 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
106 };
107
108 static const u32 stoney_mgcg_cgcg_init[] =
109 {
110 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
111 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
112 };
113
114 static const u32 golden_settings_stoney_common[] =
115 {
116 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
117 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
118 };
119
120 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
121 {
122 switch (adev->asic_type) {
123 case CHIP_FIJI:
124 amdgpu_device_program_register_sequence(adev,
125 fiji_mgcg_cgcg_init,
126 ARRAY_SIZE(fiji_mgcg_cgcg_init));
127 amdgpu_device_program_register_sequence(adev,
128 golden_settings_fiji_a10,
129 ARRAY_SIZE(golden_settings_fiji_a10));
130 break;
131 case CHIP_TONGA:
132 amdgpu_device_program_register_sequence(adev,
133 tonga_mgcg_cgcg_init,
134 ARRAY_SIZE(tonga_mgcg_cgcg_init));
135 amdgpu_device_program_register_sequence(adev,
136 golden_settings_tonga_a11,
137 ARRAY_SIZE(golden_settings_tonga_a11));
138 break;
139 case CHIP_POLARIS11:
140 case CHIP_POLARIS12:
141 amdgpu_device_program_register_sequence(adev,
142 golden_settings_polaris11_a11,
143 ARRAY_SIZE(golden_settings_polaris11_a11));
144 break;
145 case CHIP_POLARIS10:
146 amdgpu_device_program_register_sequence(adev,
147 golden_settings_polaris10_a11,
148 ARRAY_SIZE(golden_settings_polaris10_a11));
149 break;
150 case CHIP_CARRIZO:
151 amdgpu_device_program_register_sequence(adev,
152 cz_mgcg_cgcg_init,
153 ARRAY_SIZE(cz_mgcg_cgcg_init));
154 break;
155 case CHIP_STONEY:
156 amdgpu_device_program_register_sequence(adev,
157 stoney_mgcg_cgcg_init,
158 ARRAY_SIZE(stoney_mgcg_cgcg_init));
159 amdgpu_device_program_register_sequence(adev,
160 golden_settings_stoney_common,
161 ARRAY_SIZE(golden_settings_stoney_common));
162 break;
163 default:
164 break;
165 }
166 }
167
168 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
169 {
170 u32 blackout;
171
172 gmc_v8_0_wait_for_idle(adev);
173
174 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
175 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
176 /* Block CPU access */
177 WREG32(mmBIF_FB_EN, 0);
178 /* blackout the MC */
179 blackout = REG_SET_FIELD(blackout,
180 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
181 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
182 }
183 /* wait for the MC to settle */
184 udelay(100);
185 }
186
187 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
188 {
189 u32 tmp;
190
191 /* unblackout the MC */
192 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
193 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
194 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
195 /* allow CPU access */
196 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
197 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
198 WREG32(mmBIF_FB_EN, tmp);
199 }
200
201 /**
202 * gmc_v8_0_init_microcode - load ucode images from disk
203 *
204 * @adev: amdgpu_device pointer
205 *
206 * Use the firmware interface to load the ucode images into
207 * the driver (not loaded into hw).
208 * Returns 0 on success, error on failure.
209 */
210 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
211 {
212 const char *chip_name;
213 char fw_name[30];
214 int err;
215
216 DRM_DEBUG("\n");
217
218 switch (adev->asic_type) {
219 case CHIP_TONGA:
220 chip_name = "tonga";
221 break;
222 case CHIP_POLARIS11:
223 chip_name = "polaris11";
224 break;
225 case CHIP_POLARIS10:
226 chip_name = "polaris10";
227 break;
228 case CHIP_POLARIS12:
229 chip_name = "polaris12";
230 break;
231 case CHIP_FIJI:
232 case CHIP_CARRIZO:
233 case CHIP_STONEY:
234 return 0;
235 default: BUG();
236 }
237
238 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
239 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
240 if (err)
241 goto out;
242 err = amdgpu_ucode_validate(adev->gmc.fw);
243
244 out:
245 if (err) {
246 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
247 release_firmware(adev->gmc.fw);
248 adev->gmc.fw = NULL;
249 }
250 return err;
251 }
252
253 /**
254 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
255 *
256 * @adev: amdgpu_device pointer
257 *
258 * Load the GDDR MC ucode into the hw (CIK).
259 * Returns 0 on success, error on failure.
260 */
261 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
262 {
263 const struct mc_firmware_header_v1_0 *hdr;
264 const __le32 *fw_data = NULL;
265 const __le32 *io_mc_regs = NULL;
266 u32 running;
267 int i, ucode_size, regs_size;
268
269 /* Skip MC ucode loading on SR-IOV capable boards.
270 * vbios does this for us in asic_init in that case.
271 * Skip MC ucode loading on VF, because hypervisor will do that
272 * for this adaptor.
273 */
274 if (amdgpu_sriov_bios(adev))
275 return 0;
276
277 if (!adev->gmc.fw)
278 return -EINVAL;
279
280 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
281 amdgpu_ucode_print_mc_hdr(&hdr->header);
282
283 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
284 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
285 io_mc_regs = (const __le32 *)
286 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
287 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
288 fw_data = (const __le32 *)
289 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
290
291 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
292
293 if (running == 0) {
294 /* reset the engine and set to writable */
295 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
296 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
297
298 /* load mc io regs */
299 for (i = 0; i < regs_size; i++) {
300 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
301 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
302 }
303 /* load the MC ucode */
304 for (i = 0; i < ucode_size; i++)
305 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
306
307 /* put the engine back into the active state */
308 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
309 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
310 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
311
312 /* wait for training to complete */
313 for (i = 0; i < adev->usec_timeout; i++) {
314 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
315 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
316 break;
317 udelay(1);
318 }
319 for (i = 0; i < adev->usec_timeout; i++) {
320 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
321 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
322 break;
323 udelay(1);
324 }
325 }
326
327 return 0;
328 }
329
330 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
331 {
332 const struct mc_firmware_header_v1_0 *hdr;
333 const __le32 *fw_data = NULL;
334 const __le32 *io_mc_regs = NULL;
335 u32 data, vbios_version;
336 int i, ucode_size, regs_size;
337
338 /* Skip MC ucode loading on SR-IOV capable boards.
339 * vbios does this for us in asic_init in that case.
340 * Skip MC ucode loading on VF, because hypervisor will do that
341 * for this adaptor.
342 */
343 if (amdgpu_sriov_bios(adev))
344 return 0;
345
346 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
347 data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
348 vbios_version = data & 0xf;
349
350 if (vbios_version == 0)
351 return 0;
352
353 if (!adev->gmc.fw)
354 return -EINVAL;
355
356 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
357 amdgpu_ucode_print_mc_hdr(&hdr->header);
358
359 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
360 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
361 io_mc_regs = (const __le32 *)
362 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
363 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
364 fw_data = (const __le32 *)
365 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
366
367 data = RREG32(mmMC_SEQ_MISC0);
368 data &= ~(0x40);
369 WREG32(mmMC_SEQ_MISC0, data);
370
371 /* load mc io regs */
372 for (i = 0; i < regs_size; i++) {
373 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
374 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
375 }
376
377 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
378 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
379
380 /* load the MC ucode */
381 for (i = 0; i < ucode_size; i++)
382 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
383
384 /* put the engine back into the active state */
385 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
386 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
387 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
388
389 /* wait for training to complete */
390 for (i = 0; i < adev->usec_timeout; i++) {
391 data = RREG32(mmMC_SEQ_MISC0);
392 if (data & 0x80)
393 break;
394 udelay(1);
395 }
396
397 return 0;
398 }
399
400 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
401 struct amdgpu_gmc *mc)
402 {
403 u64 base = 0;
404
405 if (!amdgpu_sriov_vf(adev))
406 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
407 base <<= 24;
408
409 amdgpu_device_vram_location(adev, &adev->gmc, base);
410 amdgpu_device_gart_location(adev, mc);
411 }
412
413 /**
414 * gmc_v8_0_mc_program - program the GPU memory controller
415 *
416 * @adev: amdgpu_device pointer
417 *
418 * Set the location of vram, gart, and AGP in the GPU's
419 * physical address space (CIK).
420 */
421 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
422 {
423 u32 tmp;
424 int i, j;
425
426 /* Initialize HDP */
427 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
428 WREG32((0xb05 + j), 0x00000000);
429 WREG32((0xb06 + j), 0x00000000);
430 WREG32((0xb07 + j), 0x00000000);
431 WREG32((0xb08 + j), 0x00000000);
432 WREG32((0xb09 + j), 0x00000000);
433 }
434 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
435
436 if (gmc_v8_0_wait_for_idle((void *)adev)) {
437 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
438 }
439 if (adev->mode_info.num_crtc) {
440 /* Lockout access through VGA aperture*/
441 tmp = RREG32(mmVGA_HDP_CONTROL);
442 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
443 WREG32(mmVGA_HDP_CONTROL, tmp);
444
445 /* disable VGA render */
446 tmp = RREG32(mmVGA_RENDER_CONTROL);
447 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
448 WREG32(mmVGA_RENDER_CONTROL, tmp);
449 }
450 /* Update configuration */
451 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
452 adev->gmc.vram_start >> 12);
453 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
454 adev->gmc.vram_end >> 12);
455 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
456 adev->vram_scratch.gpu_addr >> 12);
457
458 if (amdgpu_sriov_vf(adev)) {
459 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
460 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
461 WREG32(mmMC_VM_FB_LOCATION, tmp);
462 /* XXX double check these! */
463 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
464 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
465 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
466 }
467
468 WREG32(mmMC_VM_AGP_BASE, 0);
469 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
470 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
471 if (gmc_v8_0_wait_for_idle((void *)adev)) {
472 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
473 }
474
475 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
476
477 tmp = RREG32(mmHDP_MISC_CNTL);
478 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
479 WREG32(mmHDP_MISC_CNTL, tmp);
480
481 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
482 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
483 }
484
485 /**
486 * gmc_v8_0_mc_init - initialize the memory controller driver params
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Look up the amount of vram, vram width, and decide how to place
491 * vram and gart within the GPU's physical address space (CIK).
492 * Returns 0 for success.
493 */
494 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
495 {
496 int r;
497
498 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
499 if (!adev->gmc.vram_width) {
500 u32 tmp;
501 int chansize, numchan;
502
503 /* Get VRAM informations */
504 tmp = RREG32(mmMC_ARB_RAMCFG);
505 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
506 chansize = 64;
507 } else {
508 chansize = 32;
509 }
510 tmp = RREG32(mmMC_SHARED_CHMAP);
511 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
512 case 0:
513 default:
514 numchan = 1;
515 break;
516 case 1:
517 numchan = 2;
518 break;
519 case 2:
520 numchan = 4;
521 break;
522 case 3:
523 numchan = 8;
524 break;
525 case 4:
526 numchan = 3;
527 break;
528 case 5:
529 numchan = 6;
530 break;
531 case 6:
532 numchan = 10;
533 break;
534 case 7:
535 numchan = 12;
536 break;
537 case 8:
538 numchan = 16;
539 break;
540 }
541 adev->gmc.vram_width = numchan * chansize;
542 }
543 /* size in MB on si */
544 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
545 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
546
547 if (!(adev->flags & AMD_IS_APU)) {
548 r = amdgpu_device_resize_fb_bar(adev);
549 if (r)
550 return r;
551 }
552 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
553 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
554
555 #ifdef CONFIG_X86_64
556 if (adev->flags & AMD_IS_APU) {
557 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
558 adev->gmc.aper_size = adev->gmc.real_vram_size;
559 }
560 #endif
561
562 /* In case the PCI BAR is larger than the actual amount of vram */
563 adev->gmc.visible_vram_size = adev->gmc.aper_size;
564 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
565 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
566
567 /* set the gart size */
568 if (amdgpu_gart_size == -1) {
569 switch (adev->asic_type) {
570 case CHIP_POLARIS11: /* all engines support GPUVM */
571 case CHIP_POLARIS10: /* all engines support GPUVM */
572 case CHIP_POLARIS12: /* all engines support GPUVM */
573 default:
574 adev->gmc.gart_size = 256ULL << 20;
575 break;
576 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
577 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
578 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
579 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
580 adev->gmc.gart_size = 1024ULL << 20;
581 break;
582 }
583 } else {
584 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
585 }
586
587 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
588
589 return 0;
590 }
591
592 /*
593 * GART
594 * VMID 0 is the physical GPU addresses as used by the kernel.
595 * VMIDs 1-15 are used for userspace clients and are handled
596 * by the amdgpu vm/hsa code.
597 */
598
599 /**
600 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
601 *
602 * @adev: amdgpu_device pointer
603 * @vmid: vm instance to flush
604 *
605 * Flush the TLB for the requested page table (CIK).
606 */
607 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
608 uint32_t vmid)
609 {
610 /* bits 0-15 are the VM contexts0-15 */
611 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
612 }
613
614 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
615 unsigned vmid, uint64_t pd_addr)
616 {
617 uint32_t reg;
618
619 if (vmid < 8)
620 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
621 else
622 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
623 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
624
625 /* bits 0-15 are the VM contexts0-15 */
626 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
627
628 return pd_addr;
629 }
630
631 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
632 unsigned pasid)
633 {
634 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
635 }
636
637 /**
638 * gmc_v8_0_set_pte_pde - update the page tables using MMIO
639 *
640 * @adev: amdgpu_device pointer
641 * @cpu_pt_addr: cpu address of the page table
642 * @gpu_page_idx: entry in the page table to update
643 * @addr: dst addr to write into pte/pde
644 * @flags: access flags
645 *
646 * Update the page tables using the CPU.
647 */
648 static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
649 uint32_t gpu_page_idx, uint64_t addr,
650 uint64_t flags)
651 {
652 void __iomem *ptr = (void *)cpu_pt_addr;
653 uint64_t value;
654
655 /*
656 * PTE format on VI:
657 * 63:40 reserved
658 * 39:12 4k physical page base address
659 * 11:7 fragment
660 * 6 write
661 * 5 read
662 * 4 exe
663 * 3 reserved
664 * 2 snooped
665 * 1 system
666 * 0 valid
667 *
668 * PDE format on VI:
669 * 63:59 block fragment size
670 * 58:40 reserved
671 * 39:1 physical base address of PTE
672 * bits 5:1 must be 0.
673 * 0 valid
674 */
675 value = addr & 0x000000FFFFFFF000ULL;
676 value |= flags;
677 writeq(value, ptr + (gpu_page_idx * 8));
678
679 return 0;
680 }
681
682 static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
683 uint32_t flags)
684 {
685 uint64_t pte_flag = 0;
686
687 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
688 pte_flag |= AMDGPU_PTE_EXECUTABLE;
689 if (flags & AMDGPU_VM_PAGE_READABLE)
690 pte_flag |= AMDGPU_PTE_READABLE;
691 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
692 pte_flag |= AMDGPU_PTE_WRITEABLE;
693 if (flags & AMDGPU_VM_PAGE_PRT)
694 pte_flag |= AMDGPU_PTE_PRT;
695
696 return pte_flag;
697 }
698
699 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
700 uint64_t *addr, uint64_t *flags)
701 {
702 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
703 }
704
705 /**
706 * gmc_v8_0_set_fault_enable_default - update VM fault handling
707 *
708 * @adev: amdgpu_device pointer
709 * @value: true redirects VM faults to the default page
710 */
711 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
712 bool value)
713 {
714 u32 tmp;
715
716 tmp = RREG32(mmVM_CONTEXT1_CNTL);
717 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
718 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
719 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
720 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
721 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
722 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
723 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
724 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
725 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
726 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
727 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
728 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
729 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
730 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
731 WREG32(mmVM_CONTEXT1_CNTL, tmp);
732 }
733
734 /**
735 * gmc_v8_0_set_prt - set PRT VM fault
736 *
737 * @adev: amdgpu_device pointer
738 * @enable: enable/disable VM fault handling for PRT
739 */
740 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
741 {
742 u32 tmp;
743
744 if (enable && !adev->gmc.prt_warning) {
745 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
746 adev->gmc.prt_warning = true;
747 }
748
749 tmp = RREG32(mmVM_PRT_CNTL);
750 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
751 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
752 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
753 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
754 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
755 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
756 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
757 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
758 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
759 L2_CACHE_STORE_INVALID_ENTRIES, enable);
760 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
761 L1_TLB_STORE_INVALID_ENTRIES, enable);
762 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
763 MASK_PDE0_FAULT, enable);
764 WREG32(mmVM_PRT_CNTL, tmp);
765
766 if (enable) {
767 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
768 uint32_t high = adev->vm_manager.max_pfn -
769 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
770
771 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
772 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
773 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
774 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
775 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
776 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
777 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
778 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
779 } else {
780 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
781 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
782 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
783 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
784 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
785 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
786 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
787 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
788 }
789 }
790
791 /**
792 * gmc_v8_0_gart_enable - gart enable
793 *
794 * @adev: amdgpu_device pointer
795 *
796 * This sets up the TLBs, programs the page tables for VMID0,
797 * sets up the hw for VMIDs 1-15 which are allocated on
798 * demand, and sets up the global locations for the LDS, GDS,
799 * and GPUVM for FSA64 clients (CIK).
800 * Returns 0 for success, errors for failure.
801 */
802 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
803 {
804 int r, i;
805 u32 tmp, field;
806
807 if (adev->gart.robj == NULL) {
808 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
809 return -EINVAL;
810 }
811 r = amdgpu_gart_table_vram_pin(adev);
812 if (r)
813 return r;
814 /* Setup TLB control */
815 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
816 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
817 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
818 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
819 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
820 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
821 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
822 /* Setup L2 cache */
823 tmp = RREG32(mmVM_L2_CNTL);
824 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
825 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
826 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
827 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
828 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
829 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
830 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
831 WREG32(mmVM_L2_CNTL, tmp);
832 tmp = RREG32(mmVM_L2_CNTL2);
833 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
834 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
835 WREG32(mmVM_L2_CNTL2, tmp);
836
837 field = adev->vm_manager.fragment_size;
838 tmp = RREG32(mmVM_L2_CNTL3);
839 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
840 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
841 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
842 WREG32(mmVM_L2_CNTL3, tmp);
843 /* XXX: set to enable PTE/PDE in system memory */
844 tmp = RREG32(mmVM_L2_CNTL4);
845 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
846 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
847 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
848 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
849 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
850 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
851 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
852 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
853 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
854 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
855 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
856 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
857 WREG32(mmVM_L2_CNTL4, tmp);
858 /* setup context0 */
859 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
860 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
861 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
862 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
863 (u32)(adev->dummy_page_addr >> 12));
864 WREG32(mmVM_CONTEXT0_CNTL2, 0);
865 tmp = RREG32(mmVM_CONTEXT0_CNTL);
866 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
867 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
868 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
869 WREG32(mmVM_CONTEXT0_CNTL, tmp);
870
871 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
872 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
873 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
874
875 /* empty context1-15 */
876 /* FIXME start with 4G, once using 2 level pt switch to full
877 * vm size space
878 */
879 /* set vm size, must be a multiple of 4 */
880 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
881 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
882 for (i = 1; i < 16; i++) {
883 if (i < 8)
884 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
885 adev->gart.table_addr >> 12);
886 else
887 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
888 adev->gart.table_addr >> 12);
889 }
890
891 /* enable context1-15 */
892 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
893 (u32)(adev->dummy_page_addr >> 12));
894 WREG32(mmVM_CONTEXT1_CNTL2, 4);
895 tmp = RREG32(mmVM_CONTEXT1_CNTL);
896 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
897 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
898 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
899 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
900 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
901 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
902 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
903 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
904 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
905 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
906 adev->vm_manager.block_size - 9);
907 WREG32(mmVM_CONTEXT1_CNTL, tmp);
908 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
909 gmc_v8_0_set_fault_enable_default(adev, false);
910 else
911 gmc_v8_0_set_fault_enable_default(adev, true);
912
913 gmc_v8_0_flush_gpu_tlb(adev, 0);
914 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
915 (unsigned)(adev->gmc.gart_size >> 20),
916 (unsigned long long)adev->gart.table_addr);
917 adev->gart.ready = true;
918 return 0;
919 }
920
921 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
922 {
923 int r;
924
925 if (adev->gart.robj) {
926 WARN(1, "R600 PCIE GART already initialized\n");
927 return 0;
928 }
929 /* Initialize common gart structure */
930 r = amdgpu_gart_init(adev);
931 if (r)
932 return r;
933 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
934 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
935 return amdgpu_gart_table_vram_alloc(adev);
936 }
937
938 /**
939 * gmc_v8_0_gart_disable - gart disable
940 *
941 * @adev: amdgpu_device pointer
942 *
943 * This disables all VM page table (CIK).
944 */
945 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
946 {
947 u32 tmp;
948
949 /* Disable all tables */
950 WREG32(mmVM_CONTEXT0_CNTL, 0);
951 WREG32(mmVM_CONTEXT1_CNTL, 0);
952 /* Setup TLB control */
953 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
954 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
955 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
956 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
957 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
958 /* Setup L2 cache */
959 tmp = RREG32(mmVM_L2_CNTL);
960 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
961 WREG32(mmVM_L2_CNTL, tmp);
962 WREG32(mmVM_L2_CNTL2, 0);
963 amdgpu_gart_table_vram_unpin(adev);
964 }
965
966 /**
967 * gmc_v8_0_gart_fini - vm fini callback
968 *
969 * @adev: amdgpu_device pointer
970 *
971 * Tears down the driver GART/VM setup (CIK).
972 */
973 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
974 {
975 amdgpu_gart_table_vram_free(adev);
976 amdgpu_gart_fini(adev);
977 }
978
979 /**
980 * gmc_v8_0_vm_decode_fault - print human readable fault info
981 *
982 * @adev: amdgpu_device pointer
983 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
984 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
985 *
986 * Print human readable fault information (CIK).
987 */
988 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
989 u32 addr, u32 mc_client, unsigned pasid)
990 {
991 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
992 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
993 PROTECTIONS);
994 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
995 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
996 u32 mc_id;
997
998 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
999 MEMORY_CLIENT_ID);
1000
1001 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1002 protections, vmid, pasid, addr,
1003 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1004 MEMORY_CLIENT_RW) ?
1005 "write" : "read", block, mc_client, mc_id);
1006 }
1007
1008 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1009 {
1010 switch (mc_seq_vram_type) {
1011 case MC_SEQ_MISC0__MT__GDDR1:
1012 return AMDGPU_VRAM_TYPE_GDDR1;
1013 case MC_SEQ_MISC0__MT__DDR2:
1014 return AMDGPU_VRAM_TYPE_DDR2;
1015 case MC_SEQ_MISC0__MT__GDDR3:
1016 return AMDGPU_VRAM_TYPE_GDDR3;
1017 case MC_SEQ_MISC0__MT__GDDR4:
1018 return AMDGPU_VRAM_TYPE_GDDR4;
1019 case MC_SEQ_MISC0__MT__GDDR5:
1020 return AMDGPU_VRAM_TYPE_GDDR5;
1021 case MC_SEQ_MISC0__MT__HBM:
1022 return AMDGPU_VRAM_TYPE_HBM;
1023 case MC_SEQ_MISC0__MT__DDR3:
1024 return AMDGPU_VRAM_TYPE_DDR3;
1025 default:
1026 return AMDGPU_VRAM_TYPE_UNKNOWN;
1027 }
1028 }
1029
1030 static int gmc_v8_0_early_init(void *handle)
1031 {
1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034 gmc_v8_0_set_gmc_funcs(adev);
1035 gmc_v8_0_set_irq_funcs(adev);
1036
1037 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1038 adev->gmc.shared_aperture_end =
1039 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1040 adev->gmc.private_aperture_start =
1041 adev->gmc.shared_aperture_end + 1;
1042 adev->gmc.private_aperture_end =
1043 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1044
1045 return 0;
1046 }
1047
1048 static int gmc_v8_0_late_init(void *handle)
1049 {
1050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1051
1052 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1053 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1054 else
1055 return 0;
1056 }
1057
1058 #define mmMC_SEQ_MISC0_FIJI 0xA71
1059
1060 static int gmc_v8_0_sw_init(void *handle)
1061 {
1062 int r;
1063 int dma_bits;
1064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1065
1066 if (adev->flags & AMD_IS_APU) {
1067 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1068 } else {
1069 u32 tmp;
1070
1071 if (adev->asic_type == CHIP_FIJI)
1072 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1073 else
1074 tmp = RREG32(mmMC_SEQ_MISC0);
1075 tmp &= MC_SEQ_MISC0__MT__MASK;
1076 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1077 }
1078
1079 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
1080 if (r)
1081 return r;
1082
1083 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
1084 if (r)
1085 return r;
1086
1087 /* Adjust VM size here.
1088 * Currently set to 4GB ((1 << 20) 4k pages).
1089 * Max GPUVM size for cayman and SI is 40 bits.
1090 */
1091 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1092
1093 /* Set the internal MC address mask
1094 * This is the max address of the GPU's
1095 * internal address space.
1096 */
1097 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1098
1099 adev->gmc.stolen_size = 256 * 1024;
1100
1101 /* set DMA mask + need_dma32 flags.
1102 * PCIE - can handle 40-bits.
1103 * IGP - can handle 40-bits
1104 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1105 */
1106 adev->need_dma32 = false;
1107 dma_bits = adev->need_dma32 ? 32 : 40;
1108 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1109 if (r) {
1110 adev->need_dma32 = true;
1111 dma_bits = 32;
1112 pr_warn("amdgpu: No suitable DMA available\n");
1113 }
1114 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1115 if (r) {
1116 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1117 pr_warn("amdgpu: No coherent DMA available\n");
1118 }
1119 adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1120
1121 r = gmc_v8_0_init_microcode(adev);
1122 if (r) {
1123 DRM_ERROR("Failed to load mc firmware!\n");
1124 return r;
1125 }
1126
1127 r = gmc_v8_0_mc_init(adev);
1128 if (r)
1129 return r;
1130
1131 /* Memory manager */
1132 r = amdgpu_bo_init(adev);
1133 if (r)
1134 return r;
1135
1136 r = gmc_v8_0_gart_init(adev);
1137 if (r)
1138 return r;
1139
1140 /*
1141 * number of VMs
1142 * VMID 0 is reserved for System
1143 * amdgpu graphics/compute will use VMIDs 1-7
1144 * amdkfd will use VMIDs 8-15
1145 */
1146 adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
1147 amdgpu_vm_manager_init(adev);
1148
1149 /* base offset of vram pages */
1150 if (adev->flags & AMD_IS_APU) {
1151 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1152
1153 tmp <<= 22;
1154 adev->vm_manager.vram_base_offset = tmp;
1155 } else {
1156 adev->vm_manager.vram_base_offset = 0;
1157 }
1158
1159 return 0;
1160 }
1161
1162 static int gmc_v8_0_sw_fini(void *handle)
1163 {
1164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165
1166 amdgpu_gem_force_release(adev);
1167 amdgpu_vm_manager_fini(adev);
1168 gmc_v8_0_gart_fini(adev);
1169 amdgpu_bo_fini(adev);
1170 release_firmware(adev->gmc.fw);
1171 adev->gmc.fw = NULL;
1172
1173 return 0;
1174 }
1175
1176 static int gmc_v8_0_hw_init(void *handle)
1177 {
1178 int r;
1179 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1180
1181 gmc_v8_0_init_golden_registers(adev);
1182
1183 gmc_v8_0_mc_program(adev);
1184
1185 if (adev->asic_type == CHIP_TONGA) {
1186 r = gmc_v8_0_tonga_mc_load_microcode(adev);
1187 if (r) {
1188 DRM_ERROR("Failed to load MC firmware!\n");
1189 return r;
1190 }
1191 } else if (adev->asic_type == CHIP_POLARIS11 ||
1192 adev->asic_type == CHIP_POLARIS10 ||
1193 adev->asic_type == CHIP_POLARIS12) {
1194 r = gmc_v8_0_polaris_mc_load_microcode(adev);
1195 if (r) {
1196 DRM_ERROR("Failed to load MC firmware!\n");
1197 return r;
1198 }
1199 }
1200
1201 r = gmc_v8_0_gart_enable(adev);
1202 if (r)
1203 return r;
1204
1205 return r;
1206 }
1207
1208 static int gmc_v8_0_hw_fini(void *handle)
1209 {
1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211
1212 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1213 gmc_v8_0_gart_disable(adev);
1214
1215 return 0;
1216 }
1217
1218 static int gmc_v8_0_suspend(void *handle)
1219 {
1220 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1221
1222 gmc_v8_0_hw_fini(adev);
1223
1224 return 0;
1225 }
1226
1227 static int gmc_v8_0_resume(void *handle)
1228 {
1229 int r;
1230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231
1232 r = gmc_v8_0_hw_init(adev);
1233 if (r)
1234 return r;
1235
1236 amdgpu_vmid_reset_all(adev);
1237
1238 return 0;
1239 }
1240
1241 static bool gmc_v8_0_is_idle(void *handle)
1242 {
1243 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244 u32 tmp = RREG32(mmSRBM_STATUS);
1245
1246 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1247 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1248 return false;
1249
1250 return true;
1251 }
1252
1253 static int gmc_v8_0_wait_for_idle(void *handle)
1254 {
1255 unsigned i;
1256 u32 tmp;
1257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258
1259 for (i = 0; i < adev->usec_timeout; i++) {
1260 /* read MC_STATUS */
1261 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1262 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1263 SRBM_STATUS__MCC_BUSY_MASK |
1264 SRBM_STATUS__MCD_BUSY_MASK |
1265 SRBM_STATUS__VMC_BUSY_MASK |
1266 SRBM_STATUS__VMC1_BUSY_MASK);
1267 if (!tmp)
1268 return 0;
1269 udelay(1);
1270 }
1271 return -ETIMEDOUT;
1272
1273 }
1274
1275 static bool gmc_v8_0_check_soft_reset(void *handle)
1276 {
1277 u32 srbm_soft_reset = 0;
1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279 u32 tmp = RREG32(mmSRBM_STATUS);
1280
1281 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1282 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1283 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1284
1285 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1287 if (!(adev->flags & AMD_IS_APU))
1288 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1289 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1290 }
1291 if (srbm_soft_reset) {
1292 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1293 return true;
1294 } else {
1295 adev->gmc.srbm_soft_reset = 0;
1296 return false;
1297 }
1298 }
1299
1300 static int gmc_v8_0_pre_soft_reset(void *handle)
1301 {
1302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303
1304 if (!adev->gmc.srbm_soft_reset)
1305 return 0;
1306
1307 gmc_v8_0_mc_stop(adev);
1308 if (gmc_v8_0_wait_for_idle(adev)) {
1309 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1310 }
1311
1312 return 0;
1313 }
1314
1315 static int gmc_v8_0_soft_reset(void *handle)
1316 {
1317 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 u32 srbm_soft_reset;
1319
1320 if (!adev->gmc.srbm_soft_reset)
1321 return 0;
1322 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1323
1324 if (srbm_soft_reset) {
1325 u32 tmp;
1326
1327 tmp = RREG32(mmSRBM_SOFT_RESET);
1328 tmp |= srbm_soft_reset;
1329 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1330 WREG32(mmSRBM_SOFT_RESET, tmp);
1331 tmp = RREG32(mmSRBM_SOFT_RESET);
1332
1333 udelay(50);
1334
1335 tmp &= ~srbm_soft_reset;
1336 WREG32(mmSRBM_SOFT_RESET, tmp);
1337 tmp = RREG32(mmSRBM_SOFT_RESET);
1338
1339 /* Wait a little for things to settle down */
1340 udelay(50);
1341 }
1342
1343 return 0;
1344 }
1345
1346 static int gmc_v8_0_post_soft_reset(void *handle)
1347 {
1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1349
1350 if (!adev->gmc.srbm_soft_reset)
1351 return 0;
1352
1353 gmc_v8_0_mc_resume(adev);
1354 return 0;
1355 }
1356
1357 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1358 struct amdgpu_irq_src *src,
1359 unsigned type,
1360 enum amdgpu_interrupt_state state)
1361 {
1362 u32 tmp;
1363 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1364 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1365 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1366 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1367 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1368 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1369 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1370
1371 switch (state) {
1372 case AMDGPU_IRQ_STATE_DISABLE:
1373 /* system context */
1374 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1375 tmp &= ~bits;
1376 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1377 /* VMs */
1378 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1379 tmp &= ~bits;
1380 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1381 break;
1382 case AMDGPU_IRQ_STATE_ENABLE:
1383 /* system context */
1384 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1385 tmp |= bits;
1386 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1387 /* VMs */
1388 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1389 tmp |= bits;
1390 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1391 break;
1392 default:
1393 break;
1394 }
1395
1396 return 0;
1397 }
1398
1399 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1400 struct amdgpu_irq_src *source,
1401 struct amdgpu_iv_entry *entry)
1402 {
1403 u32 addr, status, mc_client;
1404
1405 if (amdgpu_sriov_vf(adev)) {
1406 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1407 entry->src_id, entry->src_data[0]);
1408 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1409 return 0;
1410 }
1411
1412 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1413 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1414 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1415 /* reset addr and status */
1416 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1417
1418 if (!addr && !status)
1419 return 0;
1420
1421 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1422 gmc_v8_0_set_fault_enable_default(adev, false);
1423
1424 if (printk_ratelimit()) {
1425 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1426 entry->src_id, entry->src_data[0]);
1427 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1428 addr);
1429 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1430 status);
1431 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1432 entry->pasid);
1433 }
1434
1435 return 0;
1436 }
1437
1438 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1439 bool enable)
1440 {
1441 uint32_t data;
1442
1443 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1444 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1445 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1446 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1447
1448 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1449 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1450 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1451
1452 data = RREG32(mmMC_HUB_MISC_VM_CG);
1453 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1454 WREG32(mmMC_HUB_MISC_VM_CG, data);
1455
1456 data = RREG32(mmMC_XPB_CLK_GAT);
1457 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1458 WREG32(mmMC_XPB_CLK_GAT, data);
1459
1460 data = RREG32(mmATC_MISC_CG);
1461 data |= ATC_MISC_CG__ENABLE_MASK;
1462 WREG32(mmATC_MISC_CG, data);
1463
1464 data = RREG32(mmMC_CITF_MISC_WR_CG);
1465 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1466 WREG32(mmMC_CITF_MISC_WR_CG, data);
1467
1468 data = RREG32(mmMC_CITF_MISC_RD_CG);
1469 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1470 WREG32(mmMC_CITF_MISC_RD_CG, data);
1471
1472 data = RREG32(mmMC_CITF_MISC_VM_CG);
1473 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1474 WREG32(mmMC_CITF_MISC_VM_CG, data);
1475
1476 data = RREG32(mmVM_L2_CG);
1477 data |= VM_L2_CG__ENABLE_MASK;
1478 WREG32(mmVM_L2_CG, data);
1479 } else {
1480 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1481 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1482 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1483
1484 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1485 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1486 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1487
1488 data = RREG32(mmMC_HUB_MISC_VM_CG);
1489 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1490 WREG32(mmMC_HUB_MISC_VM_CG, data);
1491
1492 data = RREG32(mmMC_XPB_CLK_GAT);
1493 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1494 WREG32(mmMC_XPB_CLK_GAT, data);
1495
1496 data = RREG32(mmATC_MISC_CG);
1497 data &= ~ATC_MISC_CG__ENABLE_MASK;
1498 WREG32(mmATC_MISC_CG, data);
1499
1500 data = RREG32(mmMC_CITF_MISC_WR_CG);
1501 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1502 WREG32(mmMC_CITF_MISC_WR_CG, data);
1503
1504 data = RREG32(mmMC_CITF_MISC_RD_CG);
1505 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1506 WREG32(mmMC_CITF_MISC_RD_CG, data);
1507
1508 data = RREG32(mmMC_CITF_MISC_VM_CG);
1509 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1510 WREG32(mmMC_CITF_MISC_VM_CG, data);
1511
1512 data = RREG32(mmVM_L2_CG);
1513 data &= ~VM_L2_CG__ENABLE_MASK;
1514 WREG32(mmVM_L2_CG, data);
1515 }
1516 }
1517
1518 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1519 bool enable)
1520 {
1521 uint32_t data;
1522
1523 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1524 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1525 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1526 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1527
1528 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1529 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1530 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1531
1532 data = RREG32(mmMC_HUB_MISC_VM_CG);
1533 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1534 WREG32(mmMC_HUB_MISC_VM_CG, data);
1535
1536 data = RREG32(mmMC_XPB_CLK_GAT);
1537 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1538 WREG32(mmMC_XPB_CLK_GAT, data);
1539
1540 data = RREG32(mmATC_MISC_CG);
1541 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1542 WREG32(mmATC_MISC_CG, data);
1543
1544 data = RREG32(mmMC_CITF_MISC_WR_CG);
1545 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1546 WREG32(mmMC_CITF_MISC_WR_CG, data);
1547
1548 data = RREG32(mmMC_CITF_MISC_RD_CG);
1549 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1550 WREG32(mmMC_CITF_MISC_RD_CG, data);
1551
1552 data = RREG32(mmMC_CITF_MISC_VM_CG);
1553 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1554 WREG32(mmMC_CITF_MISC_VM_CG, data);
1555
1556 data = RREG32(mmVM_L2_CG);
1557 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1558 WREG32(mmVM_L2_CG, data);
1559 } else {
1560 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1561 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1562 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1563
1564 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1565 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1566 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1567
1568 data = RREG32(mmMC_HUB_MISC_VM_CG);
1569 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1570 WREG32(mmMC_HUB_MISC_VM_CG, data);
1571
1572 data = RREG32(mmMC_XPB_CLK_GAT);
1573 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1574 WREG32(mmMC_XPB_CLK_GAT, data);
1575
1576 data = RREG32(mmATC_MISC_CG);
1577 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1578 WREG32(mmATC_MISC_CG, data);
1579
1580 data = RREG32(mmMC_CITF_MISC_WR_CG);
1581 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1582 WREG32(mmMC_CITF_MISC_WR_CG, data);
1583
1584 data = RREG32(mmMC_CITF_MISC_RD_CG);
1585 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1586 WREG32(mmMC_CITF_MISC_RD_CG, data);
1587
1588 data = RREG32(mmMC_CITF_MISC_VM_CG);
1589 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1590 WREG32(mmMC_CITF_MISC_VM_CG, data);
1591
1592 data = RREG32(mmVM_L2_CG);
1593 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1594 WREG32(mmVM_L2_CG, data);
1595 }
1596 }
1597
1598 static int gmc_v8_0_set_clockgating_state(void *handle,
1599 enum amd_clockgating_state state)
1600 {
1601 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1602
1603 if (amdgpu_sriov_vf(adev))
1604 return 0;
1605
1606 switch (adev->asic_type) {
1607 case CHIP_FIJI:
1608 fiji_update_mc_medium_grain_clock_gating(adev,
1609 state == AMD_CG_STATE_GATE);
1610 fiji_update_mc_light_sleep(adev,
1611 state == AMD_CG_STATE_GATE);
1612 break;
1613 default:
1614 break;
1615 }
1616 return 0;
1617 }
1618
1619 static int gmc_v8_0_set_powergating_state(void *handle,
1620 enum amd_powergating_state state)
1621 {
1622 return 0;
1623 }
1624
1625 static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1626 {
1627 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1628 int data;
1629
1630 if (amdgpu_sriov_vf(adev))
1631 *flags = 0;
1632
1633 /* AMD_CG_SUPPORT_MC_MGCG */
1634 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1635 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1636 *flags |= AMD_CG_SUPPORT_MC_MGCG;
1637
1638 /* AMD_CG_SUPPORT_MC_LS */
1639 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1640 *flags |= AMD_CG_SUPPORT_MC_LS;
1641 }
1642
1643 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1644 .name = "gmc_v8_0",
1645 .early_init = gmc_v8_0_early_init,
1646 .late_init = gmc_v8_0_late_init,
1647 .sw_init = gmc_v8_0_sw_init,
1648 .sw_fini = gmc_v8_0_sw_fini,
1649 .hw_init = gmc_v8_0_hw_init,
1650 .hw_fini = gmc_v8_0_hw_fini,
1651 .suspend = gmc_v8_0_suspend,
1652 .resume = gmc_v8_0_resume,
1653 .is_idle = gmc_v8_0_is_idle,
1654 .wait_for_idle = gmc_v8_0_wait_for_idle,
1655 .check_soft_reset = gmc_v8_0_check_soft_reset,
1656 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1657 .soft_reset = gmc_v8_0_soft_reset,
1658 .post_soft_reset = gmc_v8_0_post_soft_reset,
1659 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1660 .set_powergating_state = gmc_v8_0_set_powergating_state,
1661 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1662 };
1663
1664 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1665 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1666 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1667 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1668 .set_pte_pde = gmc_v8_0_set_pte_pde,
1669 .set_prt = gmc_v8_0_set_prt,
1670 .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
1671 .get_vm_pde = gmc_v8_0_get_vm_pde
1672 };
1673
1674 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1675 .set = gmc_v8_0_vm_fault_interrupt_state,
1676 .process = gmc_v8_0_process_interrupt,
1677 };
1678
1679 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1680 {
1681 if (adev->gmc.gmc_funcs == NULL)
1682 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1683 }
1684
1685 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1686 {
1687 adev->gmc.vm_fault.num_types = 1;
1688 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1689 }
1690
1691 const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1692 {
1693 .type = AMD_IP_BLOCK_TYPE_GMC,
1694 .major = 8,
1695 .minor = 0,
1696 .rev = 0,
1697 .funcs = &gmc_v8_0_ip_funcs,
1698 };
1699
1700 const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1701 {
1702 .type = AMD_IP_BLOCK_TYPE_GMC,
1703 .major = 8,
1704 .minor = 1,
1705 .rev = 0,
1706 .funcs = &gmc_v8_0_ip_funcs,
1707 };
1708
1709 const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1710 {
1711 .type = AMD_IP_BLOCK_TYPE_GMC,
1712 .major = 8,
1713 .minor = 5,
1714 .rev = 0,
1715 .funcs = &gmc_v8_0_ip_funcs,
1716 };