2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
28 #include <drm/drm_cache.h>
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_gem.h"
34 #include "bif/bif_3_0_d.h"
35 #include "bif/bif_3_0_sh_mask.h"
36 #include "oss/oss_1_0_d.h"
37 #include "oss/oss_1_0_sh_mask.h"
38 #include "gmc/gmc_6_0_d.h"
39 #include "gmc/gmc_6_0_sh_mask.h"
40 #include "dce/dce_6_0_d.h"
41 #include "dce/dce_6_0_sh_mask.h"
44 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device
*adev
);
45 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device
*adev
);
46 static int gmc_v6_0_wait_for_idle(void *handle
);
48 MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
49 MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
50 MODULE_FIRMWARE("amdgpu/verde_mc.bin");
51 MODULE_FIRMWARE("amdgpu/oland_mc.bin");
52 MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
53 MODULE_FIRMWARE("amdgpu/si58_mc.bin");
55 #define MC_SEQ_MISC0__MT__MASK 0xf0000000
56 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000
57 #define MC_SEQ_MISC0__MT__DDR2 0x20000000
58 #define MC_SEQ_MISC0__MT__GDDR3 0x30000000
59 #define MC_SEQ_MISC0__MT__GDDR4 0x40000000
60 #define MC_SEQ_MISC0__MT__GDDR5 0x50000000
61 #define MC_SEQ_MISC0__MT__HBM 0x60000000
62 #define MC_SEQ_MISC0__MT__DDR3 0xB0000000
64 static void gmc_v6_0_mc_stop(struct amdgpu_device
*adev
)
68 gmc_v6_0_wait_for_idle((void *)adev
);
70 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
71 if (REG_GET_FIELD(blackout
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
) != 1) {
72 /* Block CPU access */
73 WREG32(mmBIF_FB_EN
, 0);
75 blackout
= REG_SET_FIELD(blackout
,
76 MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
77 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
79 /* wait for the MC to settle */
84 static void gmc_v6_0_mc_resume(struct amdgpu_device
*adev
)
88 /* unblackout the MC */
89 tmp
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
90 tmp
= REG_SET_FIELD(tmp
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
91 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, tmp
);
92 /* allow CPU access */
93 tmp
= REG_SET_FIELD(0, BIF_FB_EN
, FB_READ_EN
, 1);
94 tmp
= REG_SET_FIELD(tmp
, BIF_FB_EN
, FB_WRITE_EN
, 1);
95 WREG32(mmBIF_FB_EN
, tmp
);
98 static int gmc_v6_0_init_microcode(struct amdgpu_device
*adev
)
100 const char *chip_name
;
103 bool is_58_fw
= false;
107 switch (adev
->asic_type
) {
109 chip_name
= "tahiti";
112 chip_name
= "pitcairn";
121 chip_name
= "hainan";
126 /* this memory configuration requires special firmware */
127 if (((RREG32(mmMC_SEQ_MISC0
) & 0xff000000) >> 24) == 0x58)
131 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/si58_mc.bin");
133 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mc.bin", chip_name
);
134 err
= request_firmware(&adev
->gmc
.fw
, fw_name
, adev
->dev
);
138 err
= amdgpu_ucode_validate(adev
->gmc
.fw
);
143 "si_mc: Failed to load firmware \"%s\"\n",
145 release_firmware(adev
->gmc
.fw
);
151 static int gmc_v6_0_mc_load_microcode(struct amdgpu_device
*adev
)
153 const __le32
*new_fw_data
= NULL
;
155 const __le32
*new_io_mc_regs
= NULL
;
156 int i
, regs_size
, ucode_size
;
157 const struct mc_firmware_header_v1_0
*hdr
;
162 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->gmc
.fw
->data
;
164 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
166 adev
->gmc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
167 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
168 new_io_mc_regs
= (const __le32
*)
169 (adev
->gmc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
170 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
171 new_fw_data
= (const __le32
*)
172 (adev
->gmc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
174 running
= RREG32(mmMC_SEQ_SUP_CNTL
) & MC_SEQ_SUP_CNTL__RUN_MASK
;
178 /* reset the engine and set to writable */
179 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
180 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
182 /* load mc io regs */
183 for (i
= 0; i
< regs_size
; i
++) {
184 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(new_io_mc_regs
++));
185 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(new_io_mc_regs
++));
187 /* load the MC ucode */
188 for (i
= 0; i
< ucode_size
; i
++) {
189 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(new_fw_data
++));
192 /* put the engine back into the active state */
193 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
194 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
195 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
197 /* wait for training to complete */
198 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
199 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK
)
203 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
204 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK
)
214 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device
*adev
,
215 struct amdgpu_gmc
*mc
)
217 u64 base
= RREG32(mmMC_VM_FB_LOCATION
) & 0xFFFF;
220 amdgpu_gmc_vram_location(adev
, mc
, base
);
221 amdgpu_gmc_gart_location(adev
, mc
);
224 static void gmc_v6_0_mc_program(struct amdgpu_device
*adev
)
229 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
230 WREG32((0xb05 + j
), 0x00000000);
231 WREG32((0xb06 + j
), 0x00000000);
232 WREG32((0xb07 + j
), 0x00000000);
233 WREG32((0xb08 + j
), 0x00000000);
234 WREG32((0xb09 + j
), 0x00000000);
236 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
, 0);
238 if (gmc_v6_0_wait_for_idle((void *)adev
)) {
239 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
242 if (adev
->mode_info
.num_crtc
) {
245 /* Lockout access through VGA aperture*/
246 tmp
= RREG32(mmVGA_HDP_CONTROL
);
247 tmp
|= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK
;
248 WREG32(mmVGA_HDP_CONTROL
, tmp
);
250 /* disable VGA render */
251 tmp
= RREG32(mmVGA_RENDER_CONTROL
);
252 tmp
&= ~VGA_VSTATUS_CNTL
;
253 WREG32(mmVGA_RENDER_CONTROL
, tmp
);
255 /* Update configuration */
256 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
257 adev
->gmc
.vram_start
>> 12);
258 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
259 adev
->gmc
.vram_end
>> 12);
260 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
261 adev
->vram_scratch
.gpu_addr
>> 12);
262 WREG32(mmMC_VM_AGP_BASE
, 0);
263 WREG32(mmMC_VM_AGP_TOP
, 0x0FFFFFFF);
264 WREG32(mmMC_VM_AGP_BOT
, 0x0FFFFFFF);
266 if (gmc_v6_0_wait_for_idle((void *)adev
)) {
267 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
271 static int gmc_v6_0_mc_init(struct amdgpu_device
*adev
)
275 int chansize
, numchan
;
278 tmp
= RREG32(mmMC_ARB_RAMCFG
);
279 if (tmp
& (1 << 11)) {
281 } else if (tmp
& MC_ARB_RAMCFG__CHANSIZE_MASK
) {
286 tmp
= RREG32(mmMC_SHARED_CHMAP
);
287 switch ((tmp
& MC_SHARED_CHMAP__NOOFCHAN_MASK
) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT
) {
317 adev
->gmc
.vram_width
= numchan
* chansize
;
318 /* size in MB on si */
319 adev
->gmc
.mc_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
320 adev
->gmc
.real_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
322 if (!(adev
->flags
& AMD_IS_APU
)) {
323 r
= amdgpu_device_resize_fb_bar(adev
);
327 adev
->gmc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
328 adev
->gmc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
329 adev
->gmc
.visible_vram_size
= adev
->gmc
.aper_size
;
331 /* set the gart size */
332 if (amdgpu_gart_size
== -1) {
333 switch (adev
->asic_type
) {
334 case CHIP_HAINAN
: /* no MM engines */
336 adev
->gmc
.gart_size
= 256ULL << 20;
338 case CHIP_VERDE
: /* UVD, VCE do not support GPUVM */
339 case CHIP_TAHITI
: /* UVD, VCE do not support GPUVM */
340 case CHIP_PITCAIRN
: /* UVD, VCE do not support GPUVM */
341 case CHIP_OLAND
: /* UVD, VCE do not support GPUVM */
342 adev
->gmc
.gart_size
= 1024ULL << 20;
346 adev
->gmc
.gart_size
= (u64
)amdgpu_gart_size
<< 20;
349 gmc_v6_0_vram_gtt_location(adev
, &adev
->gmc
);
354 static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device
*adev
, uint32_t vmid
,
355 uint32_t vmhub
, uint32_t flush_type
)
357 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
360 static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring
*ring
,
361 unsigned vmid
, uint64_t pd_addr
)
365 /* write new base address */
367 reg
= mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ vmid
;
369 reg
= mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ (vmid
- 8);
370 amdgpu_ring_emit_wreg(ring
, reg
, pd_addr
>> 12);
372 /* bits 0-15 are the VM contexts0-15 */
373 amdgpu_ring_emit_wreg(ring
, mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
378 static void gmc_v6_0_get_vm_pde(struct amdgpu_device
*adev
, int level
,
379 uint64_t *addr
, uint64_t *flags
)
381 BUG_ON(*addr
& 0xFFFFFF0000000FFFULL
);
384 static void gmc_v6_0_get_vm_pte(struct amdgpu_device
*adev
,
385 struct amdgpu_bo_va_mapping
*mapping
,
388 *flags
&= ~AMDGPU_PTE_EXECUTABLE
;
389 *flags
&= ~AMDGPU_PTE_PRT
;
392 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device
*adev
,
397 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
398 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
399 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
400 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
401 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
402 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
403 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
404 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
405 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
406 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
407 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
408 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
409 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
410 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
414 + * gmc_v8_0_set_prt - set PRT VM fault
416 + * @adev: amdgpu_device pointer
417 + * @enable: enable/disable VM fault handling for PRT
419 static void gmc_v6_0_set_prt(struct amdgpu_device
*adev
, bool enable
)
423 if (enable
&& !adev
->gmc
.prt_warning
) {
424 dev_warn(adev
->dev
, "Disabling VM faults because of PRT request!\n");
425 adev
->gmc
.prt_warning
= true;
428 tmp
= RREG32(mmVM_PRT_CNTL
);
429 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
430 CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS
,
432 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
433 TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS
,
435 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
436 L2_CACHE_STORE_INVALID_ENTRIES
,
438 tmp
= REG_SET_FIELD(tmp
, VM_PRT_CNTL
,
439 L1_TLB_STORE_INVALID_ENTRIES
,
441 WREG32(mmVM_PRT_CNTL
, tmp
);
444 uint32_t low
= AMDGPU_VA_RESERVED_SIZE
>> AMDGPU_GPU_PAGE_SHIFT
;
445 uint32_t high
= adev
->vm_manager
.max_pfn
-
446 (AMDGPU_VA_RESERVED_SIZE
>> AMDGPU_GPU_PAGE_SHIFT
);
448 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR
, low
);
449 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR
, low
);
450 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR
, low
);
451 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR
, low
);
452 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR
, high
);
453 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR
, high
);
454 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR
, high
);
455 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR
, high
);
457 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR
, 0xfffffff);
458 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR
, 0xfffffff);
459 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR
, 0xfffffff);
460 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR
, 0xfffffff);
461 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR
, 0x0);
462 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR
, 0x0);
463 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR
, 0x0);
464 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR
, 0x0);
468 static int gmc_v6_0_gart_enable(struct amdgpu_device
*adev
)
474 if (adev
->gart
.bo
== NULL
) {
475 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
478 r
= amdgpu_gart_table_vram_pin(adev
);
482 table_addr
= amdgpu_bo_gpu_offset(adev
->gart
.bo
);
484 /* Setup TLB control */
485 WREG32(mmMC_VM_MX_L1_TLB_CNTL
,
487 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK
|
488 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK
|
489 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK
|
490 MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK
|
491 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT
));
494 VM_L2_CNTL__ENABLE_L2_CACHE_MASK
|
495 VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK
|
496 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK
|
497 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK
|
498 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT
) |
499 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT
));
500 WREG32(mmVM_L2_CNTL2
,
501 VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK
|
502 VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK
);
504 field
= adev
->vm_manager
.fragment_size
;
505 WREG32(mmVM_L2_CNTL3
,
506 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK
|
507 (field
<< VM_L2_CNTL3__BANK_SELECT__SHIFT
) |
508 (field
<< VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT
));
510 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
, adev
->gmc
.gart_start
>> 12);
511 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
, adev
->gmc
.gart_end
>> 12);
512 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, table_addr
>> 12);
513 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
514 (u32
)(adev
->dummy_page_addr
>> 12));
515 WREG32(mmVM_CONTEXT0_CNTL2
, 0);
516 WREG32(mmVM_CONTEXT0_CNTL
,
517 VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK
|
518 (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT
) |
519 VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK
);
525 /* empty context1-15 */
526 /* set vm size, must be a multiple of 4 */
527 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
528 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
, adev
->vm_manager
.max_pfn
- 1);
529 /* Assign the pt base to something valid for now; the pts used for
530 * the VMs are determined by the application and setup and assigned
531 * on the fly in the vm part of radeon_gart.c
533 for (i
= 1; i
< 16; i
++) {
535 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
,
538 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8,
542 /* enable context1-15 */
543 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
544 (u32
)(adev
->dummy_page_addr
>> 12));
545 WREG32(mmVM_CONTEXT1_CNTL2
, 4);
546 WREG32(mmVM_CONTEXT1_CNTL
,
547 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK
|
548 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT
) |
549 ((adev
->vm_manager
.block_size
- 9)
550 << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT
));
551 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
552 gmc_v6_0_set_fault_enable_default(adev
, false);
554 gmc_v6_0_set_fault_enable_default(adev
, true);
556 gmc_v6_0_flush_gpu_tlb(adev
, 0, 0, 0);
557 dev_info(adev
->dev
, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
558 (unsigned)(adev
->gmc
.gart_size
>> 20),
559 (unsigned long long)table_addr
);
560 adev
->gart
.ready
= true;
564 static int gmc_v6_0_gart_init(struct amdgpu_device
*adev
)
569 dev_warn(adev
->dev
, "gmc_v6_0 PCIE GART already initialized\n");
572 r
= amdgpu_gart_init(adev
);
575 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
576 adev
->gart
.gart_pte_flags
= 0;
577 return amdgpu_gart_table_vram_alloc(adev
);
580 static void gmc_v6_0_gart_disable(struct amdgpu_device
*adev
)
584 for (i = 1; i < 16; ++i) {
587 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
589 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
590 adev->vm_manager.saved_table_addr[i] = RREG32(reg);
593 /* Disable all tables */
594 WREG32(mmVM_CONTEXT0_CNTL
, 0);
595 WREG32(mmVM_CONTEXT1_CNTL
, 0);
596 /* Setup TLB control */
597 WREG32(mmMC_VM_MX_L1_TLB_CNTL
,
598 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK
|
599 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT
));
602 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK
|
603 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK
|
604 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT
) |
605 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT
));
606 WREG32(mmVM_L2_CNTL2
, 0);
607 WREG32(mmVM_L2_CNTL3
,
608 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK
|
609 (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT
));
610 amdgpu_gart_table_vram_unpin(adev
);
613 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device
*adev
,
614 u32 status
, u32 addr
, u32 mc_client
)
617 u32 vmid
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
618 u32 protections
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
620 char block
[5] = { mc_client
>> 24, (mc_client
>> 16) & 0xff,
621 (mc_client
>> 8) & 0xff, mc_client
& 0xff, 0 };
623 mc_id
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
626 dev_err(adev
->dev
, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
627 protections
, vmid
, addr
,
628 REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
630 "write" : "read", block
, mc_client
, mc_id
);
634 static const u32 mc_cg_registers[] = {
646 static const u32 mc_cg_ls_en[] = {
647 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
648 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
649 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
650 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
651 ATC_MISC_CG__MEM_LS_ENABLE_MASK,
652 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
653 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
654 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
655 VM_L2_CG__MEM_LS_ENABLE_MASK,
658 static const u32 mc_cg_en[] = {
659 MC_HUB_MISC_HUB_CG__ENABLE_MASK,
660 MC_HUB_MISC_SIP_CG__ENABLE_MASK,
661 MC_HUB_MISC_VM_CG__ENABLE_MASK,
662 MC_XPB_CLK_GAT__ENABLE_MASK,
663 ATC_MISC_CG__ENABLE_MASK,
664 MC_CITF_MISC_WR_CG__ENABLE_MASK,
665 MC_CITF_MISC_RD_CG__ENABLE_MASK,
666 MC_CITF_MISC_VM_CG__ENABLE_MASK,
667 VM_L2_CG__ENABLE_MASK,
670 static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
676 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
677 orig = data = RREG32(mc_cg_registers[i]);
678 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
679 data |= mc_cg_ls_en[i];
681 data &= ~mc_cg_ls_en[i];
683 WREG32(mc_cg_registers[i], data);
687 static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
693 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
694 orig = data = RREG32(mc_cg_registers[i]);
695 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
698 data &= ~mc_cg_en[i];
700 WREG32(mc_cg_registers[i], data);
704 static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
709 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
711 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
712 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
713 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
714 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
715 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
717 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
718 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
719 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
720 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
724 WREG32_PCIE(ixPCIE_CNTL2, data);
727 static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
732 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
734 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
735 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
737 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
740 WREG32(mmHDP_HOST_PATH_CNTL, data);
743 static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
748 orig = data = RREG32(mmHDP_MEM_POWER_LS);
750 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
751 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
753 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
756 WREG32(mmHDP_MEM_POWER_LS, data);
760 static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type
)
762 switch (mc_seq_vram_type
) {
763 case MC_SEQ_MISC0__MT__GDDR1
:
764 return AMDGPU_VRAM_TYPE_GDDR1
;
765 case MC_SEQ_MISC0__MT__DDR2
:
766 return AMDGPU_VRAM_TYPE_DDR2
;
767 case MC_SEQ_MISC0__MT__GDDR3
:
768 return AMDGPU_VRAM_TYPE_GDDR3
;
769 case MC_SEQ_MISC0__MT__GDDR4
:
770 return AMDGPU_VRAM_TYPE_GDDR4
;
771 case MC_SEQ_MISC0__MT__GDDR5
:
772 return AMDGPU_VRAM_TYPE_GDDR5
;
773 case MC_SEQ_MISC0__MT__DDR3
:
774 return AMDGPU_VRAM_TYPE_DDR3
;
776 return AMDGPU_VRAM_TYPE_UNKNOWN
;
780 static int gmc_v6_0_early_init(void *handle
)
782 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
784 gmc_v6_0_set_gmc_funcs(adev
);
785 gmc_v6_0_set_irq_funcs(adev
);
790 static int gmc_v6_0_late_init(void *handle
)
792 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
794 amdgpu_bo_late_init(adev
);
796 if (amdgpu_vm_fault_stop
!= AMDGPU_VM_FAULT_STOP_ALWAYS
)
797 return amdgpu_irq_get(adev
, &adev
->gmc
.vm_fault
, 0);
802 static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device
*adev
)
804 u32 d1vga_control
= RREG32(mmD1VGA_CONTROL
);
807 if (REG_GET_FIELD(d1vga_control
, D1VGA_CONTROL
, D1VGA_MODE_ENABLE
)) {
808 size
= 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
810 u32 viewport
= RREG32(mmVIEWPORT_SIZE
);
811 size
= (REG_GET_FIELD(viewport
, VIEWPORT_SIZE
, VIEWPORT_HEIGHT
) *
812 REG_GET_FIELD(viewport
, VIEWPORT_SIZE
, VIEWPORT_WIDTH
) *
815 /* return 0 if the pre-OS buffer uses up most of vram */
816 if ((adev
->gmc
.real_vram_size
- size
) < (8 * 1024 * 1024))
821 static int gmc_v6_0_sw_init(void *handle
)
824 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
826 adev
->num_vmhubs
= 1;
828 if (adev
->flags
& AMD_IS_APU
) {
829 adev
->gmc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
831 u32 tmp
= RREG32(mmMC_SEQ_MISC0
);
832 tmp
&= MC_SEQ_MISC0__MT__MASK
;
833 adev
->gmc
.vram_type
= gmc_v6_0_convert_vram_type(tmp
);
836 r
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 146, &adev
->gmc
.vm_fault
);
840 r
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 147, &adev
->gmc
.vm_fault
);
844 amdgpu_vm_adjust_size(adev
, 64, 9, 1, 40);
846 adev
->gmc
.mc_mask
= 0xffffffffffULL
;
848 r
= dma_set_mask_and_coherent(adev
->dev
, DMA_BIT_MASK(44));
850 dev_warn(adev
->dev
, "No suitable DMA available.\n");
853 adev
->need_swiotlb
= drm_need_swiotlb(44);
855 r
= gmc_v6_0_init_microcode(adev
);
857 dev_err(adev
->dev
, "Failed to load mc firmware!\n");
861 r
= gmc_v6_0_mc_init(adev
);
865 adev
->gmc
.stolen_size
= gmc_v6_0_get_vbios_fb_size(adev
);
867 r
= amdgpu_bo_init(adev
);
871 r
= gmc_v6_0_gart_init(adev
);
877 * VMID 0 is reserved for System
878 * amdgpu graphics/compute will use VMIDs 1-7
879 * amdkfd will use VMIDs 8-15
881 adev
->vm_manager
.first_kfd_vmid
= 8;
882 amdgpu_vm_manager_init(adev
);
884 /* base offset of vram pages */
885 if (adev
->flags
& AMD_IS_APU
) {
886 u64 tmp
= RREG32(mmMC_VM_FB_OFFSET
);
889 adev
->vm_manager
.vram_base_offset
= tmp
;
891 adev
->vm_manager
.vram_base_offset
= 0;
897 static int gmc_v6_0_sw_fini(void *handle
)
899 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
901 amdgpu_gem_force_release(adev
);
902 amdgpu_vm_manager_fini(adev
);
903 amdgpu_gart_table_vram_free(adev
);
904 amdgpu_bo_fini(adev
);
905 amdgpu_gart_fini(adev
);
906 release_firmware(adev
->gmc
.fw
);
912 static int gmc_v6_0_hw_init(void *handle
)
915 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
917 gmc_v6_0_mc_program(adev
);
919 if (!(adev
->flags
& AMD_IS_APU
)) {
920 r
= gmc_v6_0_mc_load_microcode(adev
);
922 dev_err(adev
->dev
, "Failed to load MC firmware!\n");
927 r
= gmc_v6_0_gart_enable(adev
);
934 static int gmc_v6_0_hw_fini(void *handle
)
936 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
938 amdgpu_irq_put(adev
, &adev
->gmc
.vm_fault
, 0);
939 gmc_v6_0_gart_disable(adev
);
944 static int gmc_v6_0_suspend(void *handle
)
946 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
948 gmc_v6_0_hw_fini(adev
);
953 static int gmc_v6_0_resume(void *handle
)
956 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
958 r
= gmc_v6_0_hw_init(adev
);
962 amdgpu_vmid_reset_all(adev
);
967 static bool gmc_v6_0_is_idle(void *handle
)
969 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
970 u32 tmp
= RREG32(mmSRBM_STATUS
);
972 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
973 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
| SRBM_STATUS__VMC_BUSY_MASK
))
979 static int gmc_v6_0_wait_for_idle(void *handle
)
982 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
984 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
985 if (gmc_v6_0_is_idle(handle
))
993 static int gmc_v6_0_soft_reset(void *handle
)
995 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
996 u32 srbm_soft_reset
= 0;
997 u32 tmp
= RREG32(mmSRBM_STATUS
);
999 if (tmp
& SRBM_STATUS__VMC_BUSY_MASK
)
1000 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1001 SRBM_SOFT_RESET
, SOFT_RESET_VMC
, 1);
1003 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1004 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
)) {
1005 if (!(adev
->flags
& AMD_IS_APU
))
1006 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1007 SRBM_SOFT_RESET
, SOFT_RESET_MC
, 1);
1010 if (srbm_soft_reset
) {
1011 gmc_v6_0_mc_stop(adev
);
1012 if (gmc_v6_0_wait_for_idle(adev
)) {
1013 dev_warn(adev
->dev
, "Wait for GMC idle timed out !\n");
1017 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1018 tmp
|= srbm_soft_reset
;
1019 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1020 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1021 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1025 tmp
&= ~srbm_soft_reset
;
1026 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1027 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1031 gmc_v6_0_mc_resume(adev
);
1038 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
1039 struct amdgpu_irq_src
*src
,
1041 enum amdgpu_interrupt_state state
)
1044 u32 bits
= (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1045 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1046 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1047 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1048 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1049 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
);
1052 case AMDGPU_IRQ_STATE_DISABLE
:
1053 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1055 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1056 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1058 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1060 case AMDGPU_IRQ_STATE_ENABLE
:
1061 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1063 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1064 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1066 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1075 static int gmc_v6_0_process_interrupt(struct amdgpu_device
*adev
,
1076 struct amdgpu_irq_src
*source
,
1077 struct amdgpu_iv_entry
*entry
)
1081 addr
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
);
1082 status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
1083 WREG32_P(mmVM_CONTEXT1_CNTL2
, 1, ~1);
1085 if (!addr
&& !status
)
1088 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_FIRST
)
1089 gmc_v6_0_set_fault_enable_default(adev
, false);
1091 if (printk_ratelimit()) {
1092 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1093 entry
->src_id
, entry
->src_data
[0]);
1094 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1096 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1098 gmc_v6_0_vm_decode_fault(adev
, status
, addr
, 0);
1104 static int gmc_v6_0_set_clockgating_state(void *handle
,
1105 enum amd_clockgating_state state
)
1110 static int gmc_v6_0_set_powergating_state(void *handle
,
1111 enum amd_powergating_state state
)
1116 static const struct amd_ip_funcs gmc_v6_0_ip_funcs
= {
1118 .early_init
= gmc_v6_0_early_init
,
1119 .late_init
= gmc_v6_0_late_init
,
1120 .sw_init
= gmc_v6_0_sw_init
,
1121 .sw_fini
= gmc_v6_0_sw_fini
,
1122 .hw_init
= gmc_v6_0_hw_init
,
1123 .hw_fini
= gmc_v6_0_hw_fini
,
1124 .suspend
= gmc_v6_0_suspend
,
1125 .resume
= gmc_v6_0_resume
,
1126 .is_idle
= gmc_v6_0_is_idle
,
1127 .wait_for_idle
= gmc_v6_0_wait_for_idle
,
1128 .soft_reset
= gmc_v6_0_soft_reset
,
1129 .set_clockgating_state
= gmc_v6_0_set_clockgating_state
,
1130 .set_powergating_state
= gmc_v6_0_set_powergating_state
,
1133 static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs
= {
1134 .flush_gpu_tlb
= gmc_v6_0_flush_gpu_tlb
,
1135 .emit_flush_gpu_tlb
= gmc_v6_0_emit_flush_gpu_tlb
,
1136 .set_prt
= gmc_v6_0_set_prt
,
1137 .get_vm_pde
= gmc_v6_0_get_vm_pde
,
1138 .get_vm_pte
= gmc_v6_0_get_vm_pte
,
1141 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs
= {
1142 .set
= gmc_v6_0_vm_fault_interrupt_state
,
1143 .process
= gmc_v6_0_process_interrupt
,
1146 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device
*adev
)
1148 adev
->gmc
.gmc_funcs
= &gmc_v6_0_gmc_funcs
;
1151 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device
*adev
)
1153 adev
->gmc
.vm_fault
.num_types
= 1;
1154 adev
->gmc
.vm_fault
.funcs
= &gmc_v6_0_irq_funcs
;
1157 const struct amdgpu_ip_block_version gmc_v6_0_ip_block
=
1159 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1163 .funcs
= &gmc_v6_0_ip_funcs
,