2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_sh_mask.h"
42 #include "athub/athub_1_0_offset.h"
43 #include "oss/osssys_4_0_offset.h"
47 #include "soc15_common.h"
48 #include "umc/umc_6_0_sh_mask.h"
50 #include "gfxhub_v1_0.h"
51 #include "mmhub_v1_0.h"
52 #include "athub_v1_0.h"
53 #include "gfxhub_v1_1.h"
54 #include "mmhub_v9_4.h"
58 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
60 #include "amdgpu_ras.h"
61 #include "amdgpu_xgmi.h"
63 /* add these here since we already include dce12 headers and these are for DCN */
64 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
65 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
69 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
71 static const u32 golden_settings_vega10_hdp
[] =
73 0xf64, 0x0fffffff, 0x00000000,
74 0xf65, 0x0fffffff, 0x00000000,
75 0xf66, 0x0fffffff, 0x00000000,
76 0xf67, 0x0fffffff, 0x00000000,
77 0xf68, 0x0fffffff, 0x00000000,
78 0xf6a, 0x0fffffff, 0x00000000,
79 0xf6b, 0x0fffffff, 0x00000000,
80 0xf6c, 0x0fffffff, 0x00000000,
81 0xf6d, 0x0fffffff, 0x00000000,
82 0xf6e, 0x0fffffff, 0x00000000,
85 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0
[] =
87 SOC15_REG_GOLDEN_VALUE(MMHUB
, 0, mmDAGB1_WRCLI2
, 0x00000007, 0xfe5fe0fa),
88 SOC15_REG_GOLDEN_VALUE(MMHUB
, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0
, 0x00000030, 0x55555565)
91 static const struct soc15_reg_golden golden_settings_athub_1_0_0
[] =
93 SOC15_REG_GOLDEN_VALUE(ATHUB
, 0, mmRPB_ARB_CNTL
, 0x0000ff00, 0x00000800),
94 SOC15_REG_GOLDEN_VALUE(ATHUB
, 0, mmRPB_ARB_CNTL2
, 0x00ff00ff, 0x00080008)
97 static const uint32_t ecc_umc_mcumc_ctrl_addrs
[] = {
98 (0x000143c0 + 0x00000000),
99 (0x000143c0 + 0x00000800),
100 (0x000143c0 + 0x00001000),
101 (0x000143c0 + 0x00001800),
102 (0x000543c0 + 0x00000000),
103 (0x000543c0 + 0x00000800),
104 (0x000543c0 + 0x00001000),
105 (0x000543c0 + 0x00001800),
106 (0x000943c0 + 0x00000000),
107 (0x000943c0 + 0x00000800),
108 (0x000943c0 + 0x00001000),
109 (0x000943c0 + 0x00001800),
110 (0x000d43c0 + 0x00000000),
111 (0x000d43c0 + 0x00000800),
112 (0x000d43c0 + 0x00001000),
113 (0x000d43c0 + 0x00001800),
114 (0x001143c0 + 0x00000000),
115 (0x001143c0 + 0x00000800),
116 (0x001143c0 + 0x00001000),
117 (0x001143c0 + 0x00001800),
118 (0x001543c0 + 0x00000000),
119 (0x001543c0 + 0x00000800),
120 (0x001543c0 + 0x00001000),
121 (0x001543c0 + 0x00001800),
122 (0x001943c0 + 0x00000000),
123 (0x001943c0 + 0x00000800),
124 (0x001943c0 + 0x00001000),
125 (0x001943c0 + 0x00001800),
126 (0x001d43c0 + 0x00000000),
127 (0x001d43c0 + 0x00000800),
128 (0x001d43c0 + 0x00001000),
129 (0x001d43c0 + 0x00001800),
132 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs
[] = {
133 (0x000143e0 + 0x00000000),
134 (0x000143e0 + 0x00000800),
135 (0x000143e0 + 0x00001000),
136 (0x000143e0 + 0x00001800),
137 (0x000543e0 + 0x00000000),
138 (0x000543e0 + 0x00000800),
139 (0x000543e0 + 0x00001000),
140 (0x000543e0 + 0x00001800),
141 (0x000943e0 + 0x00000000),
142 (0x000943e0 + 0x00000800),
143 (0x000943e0 + 0x00001000),
144 (0x000943e0 + 0x00001800),
145 (0x000d43e0 + 0x00000000),
146 (0x000d43e0 + 0x00000800),
147 (0x000d43e0 + 0x00001000),
148 (0x000d43e0 + 0x00001800),
149 (0x001143e0 + 0x00000000),
150 (0x001143e0 + 0x00000800),
151 (0x001143e0 + 0x00001000),
152 (0x001143e0 + 0x00001800),
153 (0x001543e0 + 0x00000000),
154 (0x001543e0 + 0x00000800),
155 (0x001543e0 + 0x00001000),
156 (0x001543e0 + 0x00001800),
157 (0x001943e0 + 0x00000000),
158 (0x001943e0 + 0x00000800),
159 (0x001943e0 + 0x00001000),
160 (0x001943e0 + 0x00001800),
161 (0x001d43e0 + 0x00000000),
162 (0x001d43e0 + 0x00000800),
163 (0x001d43e0 + 0x00001000),
164 (0x001d43e0 + 0x00001800),
167 static const uint32_t ecc_umc_mcumc_status_addrs
[] = {
168 (0x000143c2 + 0x00000000),
169 (0x000143c2 + 0x00000800),
170 (0x000143c2 + 0x00001000),
171 (0x000143c2 + 0x00001800),
172 (0x000543c2 + 0x00000000),
173 (0x000543c2 + 0x00000800),
174 (0x000543c2 + 0x00001000),
175 (0x000543c2 + 0x00001800),
176 (0x000943c2 + 0x00000000),
177 (0x000943c2 + 0x00000800),
178 (0x000943c2 + 0x00001000),
179 (0x000943c2 + 0x00001800),
180 (0x000d43c2 + 0x00000000),
181 (0x000d43c2 + 0x00000800),
182 (0x000d43c2 + 0x00001000),
183 (0x000d43c2 + 0x00001800),
184 (0x001143c2 + 0x00000000),
185 (0x001143c2 + 0x00000800),
186 (0x001143c2 + 0x00001000),
187 (0x001143c2 + 0x00001800),
188 (0x001543c2 + 0x00000000),
189 (0x001543c2 + 0x00000800),
190 (0x001543c2 + 0x00001000),
191 (0x001543c2 + 0x00001800),
192 (0x001943c2 + 0x00000000),
193 (0x001943c2 + 0x00000800),
194 (0x001943c2 + 0x00001000),
195 (0x001943c2 + 0x00001800),
196 (0x001d43c2 + 0x00000000),
197 (0x001d43c2 + 0x00000800),
198 (0x001d43c2 + 0x00001000),
199 (0x001d43c2 + 0x00001800),
202 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device
*adev
,
203 struct amdgpu_irq_src
*src
,
205 enum amdgpu_interrupt_state state
)
207 u32 bits
, i
, tmp
, reg
;
209 /* Devices newer then VEGA10/12 shall have these programming
210 sequences performed by PSP BL */
211 if (adev
->asic_type
>= CHIP_VEGA20
)
217 case AMDGPU_IRQ_STATE_DISABLE
:
218 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs
); i
++) {
219 reg
= ecc_umc_mcumc_ctrl_addrs
[i
];
224 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs
); i
++) {
225 reg
= ecc_umc_mcumc_ctrl_mask_addrs
[i
];
231 case AMDGPU_IRQ_STATE_ENABLE
:
232 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs
); i
++) {
233 reg
= ecc_umc_mcumc_ctrl_addrs
[i
];
238 for (i
= 0; i
< ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs
); i
++) {
239 reg
= ecc_umc_mcumc_ctrl_mask_addrs
[i
];
252 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
253 struct amdgpu_irq_src
*src
,
255 enum amdgpu_interrupt_state state
)
257 struct amdgpu_vmhub
*hub
;
258 u32 tmp
, reg
, bits
, i
, j
;
260 bits
= VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
261 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
262 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
263 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
264 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
265 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
266 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
;
269 case AMDGPU_IRQ_STATE_DISABLE
:
270 for (j
= 0; j
< adev
->num_vmhubs
; j
++) {
271 hub
= &adev
->vmhub
[j
];
272 for (i
= 0; i
< 16; i
++) {
273 reg
= hub
->vm_context0_cntl
+ i
;
280 case AMDGPU_IRQ_STATE_ENABLE
:
281 for (j
= 0; j
< adev
->num_vmhubs
; j
++) {
282 hub
= &adev
->vmhub
[j
];
283 for (i
= 0; i
< 16; i
++) {
284 reg
= hub
->vm_context0_cntl
+ i
;
297 static int gmc_v9_0_process_interrupt(struct amdgpu_device
*adev
,
298 struct amdgpu_irq_src
*source
,
299 struct amdgpu_iv_entry
*entry
)
301 struct amdgpu_vmhub
*hub
;
302 bool retry_fault
= !!(entry
->src_data
[1] & 0x80);
307 addr
= (u64
)entry
->src_data
[0] << 12;
308 addr
|= ((u64
)entry
->src_data
[1] & 0xf) << 44;
310 if (retry_fault
&& amdgpu_gmc_filter_faults(adev
, addr
, entry
->pasid
,
312 return 1; /* This also prevents sending it to KFD */
314 if (entry
->client_id
== SOC15_IH_CLIENTID_VMC
) {
315 snprintf(hub_name
, sizeof(hub_name
), "mmhub0");
316 hub
= &adev
->vmhub
[AMDGPU_MMHUB_0
];
317 } else if (entry
->client_id
== SOC15_IH_CLIENTID_VMC1
) {
318 snprintf(hub_name
, sizeof(hub_name
), "mmhub1");
319 hub
= &adev
->vmhub
[AMDGPU_MMHUB_1
];
321 snprintf(hub_name
, sizeof(hub_name
), "gfxhub0");
322 hub
= &adev
->vmhub
[AMDGPU_GFXHUB_0
];
325 /* If it's the first fault for this address, process it normally */
326 if (retry_fault
&& !in_interrupt() &&
327 amdgpu_vm_handle_fault(adev
, entry
->pasid
, addr
))
328 return 1; /* This also prevents sending it to KFD */
330 if (!amdgpu_sriov_vf(adev
)) {
332 * Issue a dummy read to wait for the status register to
333 * be updated to avoid reading an incorrect value due to
334 * the new fast GRBM interface.
336 if (entry
->vmid_src
== AMDGPU_GFXHUB_0
)
337 RREG32(hub
->vm_l2_pro_fault_status
);
339 status
= RREG32(hub
->vm_l2_pro_fault_status
);
340 WREG32_P(hub
->vm_l2_pro_fault_cntl
, 1, ~1);
343 if (printk_ratelimit()) {
344 struct amdgpu_task_info task_info
;
346 memset(&task_info
, 0, sizeof(struct amdgpu_task_info
));
347 amdgpu_vm_get_task_info(adev
, entry
->pasid
, &task_info
);
350 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
351 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
352 hub_name
, retry_fault
? "retry" : "no-retry",
353 entry
->src_id
, entry
->ring_id
, entry
->vmid
,
354 entry
->pasid
, task_info
.process_name
, task_info
.tgid
,
355 task_info
.task_name
, task_info
.pid
);
356 dev_err(adev
->dev
, " in page starting at address 0x%016llx from client %d\n",
357 addr
, entry
->client_id
);
358 if (!amdgpu_sriov_vf(adev
)) {
360 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
362 dev_err(adev
->dev
, "\t Faulty UTCL2 client ID: 0x%lx\n",
363 REG_GET_FIELD(status
,
364 VM_L2_PROTECTION_FAULT_STATUS
, CID
));
365 dev_err(adev
->dev
, "\t MORE_FAULTS: 0x%lx\n",
366 REG_GET_FIELD(status
,
367 VM_L2_PROTECTION_FAULT_STATUS
, MORE_FAULTS
));
368 dev_err(adev
->dev
, "\t WALKER_ERROR: 0x%lx\n",
369 REG_GET_FIELD(status
,
370 VM_L2_PROTECTION_FAULT_STATUS
, WALKER_ERROR
));
371 dev_err(adev
->dev
, "\t PERMISSION_FAULTS: 0x%lx\n",
372 REG_GET_FIELD(status
,
373 VM_L2_PROTECTION_FAULT_STATUS
, PERMISSION_FAULTS
));
374 dev_err(adev
->dev
, "\t MAPPING_ERROR: 0x%lx\n",
375 REG_GET_FIELD(status
,
376 VM_L2_PROTECTION_FAULT_STATUS
, MAPPING_ERROR
));
377 dev_err(adev
->dev
, "\t RW: 0x%lx\n",
378 REG_GET_FIELD(status
,
379 VM_L2_PROTECTION_FAULT_STATUS
, RW
));
387 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs
= {
388 .set
= gmc_v9_0_vm_fault_interrupt_state
,
389 .process
= gmc_v9_0_process_interrupt
,
393 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs
= {
394 .set
= gmc_v9_0_ecc_interrupt_state
,
395 .process
= amdgpu_umc_process_ecc_irq
,
398 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device
*adev
)
400 adev
->gmc
.vm_fault
.num_types
= 1;
401 adev
->gmc
.vm_fault
.funcs
= &gmc_v9_0_irq_funcs
;
403 if (!amdgpu_sriov_vf(adev
)) {
404 adev
->gmc
.ecc_irq
.num_types
= 1;
405 adev
->gmc
.ecc_irq
.funcs
= &gmc_v9_0_ecc_funcs
;
409 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid
,
414 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
415 PER_VMID_INVALIDATE_REQ
, 1 << vmid
);
416 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, FLUSH_TYPE
, flush_type
);
417 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PTES
, 1);
418 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE0
, 1);
419 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE1
, 1);
420 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE2
, 1);
421 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
, INVALIDATE_L1_PTES
, 1);
422 req
= REG_SET_FIELD(req
, VM_INVALIDATE_ENG0_REQ
,
423 CLEAR_PROTECTION_FAULT_STATUS_ADDR
, 0);
429 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
431 * @adev: amdgpu_device pointer
435 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device
*adev
,
438 return ((vmhub
== AMDGPU_MMHUB_0
||
439 vmhub
== AMDGPU_MMHUB_1
) &&
440 (!amdgpu_sriov_vf(adev
)) &&
441 (!(!(adev
->apu_flags
& AMD_APU_IS_RAVEN2
) &&
442 (adev
->apu_flags
& AMD_APU_IS_PICASSO
))));
445 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device
*adev
,
446 uint8_t vmid
, uint16_t *p_pasid
)
450 value
= RREG32(SOC15_REG_OFFSET(ATHUB
, 0, mmATC_VMID0_PASID_MAPPING
)
452 *p_pasid
= value
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
454 return !!(value
& ATC_VMID0_PASID_MAPPING__VALID_MASK
);
459 * VMID 0 is the physical GPU addresses as used by the kernel.
460 * VMIDs 1-15 are used for userspace clients and are handled
461 * by the amdgpu vm/hsa code.
465 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
467 * @adev: amdgpu_device pointer
468 * @vmid: vm instance to flush
469 * @flush_type: the flush type
471 * Flush the TLB for the requested page table using certain type.
473 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device
*adev
, uint32_t vmid
,
474 uint32_t vmhub
, uint32_t flush_type
)
476 bool use_semaphore
= gmc_v9_0_use_invalidate_semaphore(adev
, vmhub
);
477 const unsigned eng
= 17;
478 u32 j
, inv_req
, inv_req2
, tmp
;
479 struct amdgpu_vmhub
*hub
;
481 BUG_ON(vmhub
>= adev
->num_vmhubs
);
483 hub
= &adev
->vmhub
[vmhub
];
484 if (adev
->gmc
.xgmi
.num_physical_nodes
&&
485 adev
->asic_type
== CHIP_VEGA20
) {
486 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
487 * heavy-weight TLB flush (type 2), which flushes
488 * both. Due to a race condition with concurrent
489 * memory accesses using the same TLB cache line, we
490 * still need a second TLB flush after this.
492 inv_req
= gmc_v9_0_get_invalidate_req(vmid
, 2);
493 inv_req2
= gmc_v9_0_get_invalidate_req(vmid
, flush_type
);
495 inv_req
= gmc_v9_0_get_invalidate_req(vmid
, flush_type
);
499 /* This is necessary for a HW workaround under SRIOV as well
500 * as GFXOFF under bare metal
502 if (adev
->gfx
.kiq
.ring
.sched
.ready
&&
503 (amdgpu_sriov_runtime(adev
) || !amdgpu_sriov_vf(adev
)) &&
504 !adev
->in_gpu_reset
) {
505 uint32_t req
= hub
->vm_inv_eng0_req
+ hub
->eng_distance
* eng
;
506 uint32_t ack
= hub
->vm_inv_eng0_ack
+ hub
->eng_distance
* eng
;
508 amdgpu_virt_kiq_reg_write_reg_wait(adev
, req
, ack
, inv_req
,
513 spin_lock(&adev
->gmc
.invalidate_lock
);
516 * It may lose gpuvm invalidate acknowldege state across power-gating
517 * off cycle, add semaphore acquire before invalidation and semaphore
518 * release after invalidation to avoid entering power gated state
522 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
524 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
525 /* a read return value of 1 means semaphore acuqire */
526 tmp
= RREG32_NO_KIQ(hub
->vm_inv_eng0_sem
+
527 hub
->eng_distance
* eng
);
533 if (j
>= adev
->usec_timeout
)
534 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
538 WREG32_NO_KIQ(hub
->vm_inv_eng0_req
+
539 hub
->eng_distance
* eng
, inv_req
);
542 * Issue a dummy read to wait for the ACK register to
543 * be cleared to avoid a false ACK due to the new fast
546 if (vmhub
== AMDGPU_GFXHUB_0
)
547 RREG32_NO_KIQ(hub
->vm_inv_eng0_req
+
548 hub
->eng_distance
* eng
);
550 for (j
= 0; j
< adev
->usec_timeout
; j
++) {
551 tmp
= RREG32_NO_KIQ(hub
->vm_inv_eng0_ack
+
552 hub
->eng_distance
* eng
);
553 if (tmp
& (1 << vmid
))
562 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
565 * add semaphore release after invalidation,
566 * write with 0 means semaphore release
568 WREG32_NO_KIQ(hub
->vm_inv_eng0_sem
+
569 hub
->eng_distance
* eng
, 0);
571 spin_unlock(&adev
->gmc
.invalidate_lock
);
573 if (j
< adev
->usec_timeout
)
576 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
580 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
582 * @adev: amdgpu_device pointer
583 * @pasid: pasid to be flush
585 * Flush the TLB for the requested pasid.
587 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device
*adev
,
588 uint16_t pasid
, uint32_t flush_type
,
594 uint16_t queried_pasid
;
596 struct amdgpu_ring
*ring
= &adev
->gfx
.kiq
.ring
;
597 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
599 if (adev
->in_gpu_reset
)
602 if (ring
->sched
.ready
) {
603 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
604 * heavy-weight TLB flush (type 2), which flushes
605 * both. Due to a race condition with concurrent
606 * memory accesses using the same TLB cache line, we
607 * still need a second TLB flush after this.
609 bool vega20_xgmi_wa
= (adev
->gmc
.xgmi
.num_physical_nodes
&&
610 adev
->asic_type
== CHIP_VEGA20
);
611 /* 2 dwords flush + 8 dwords fence */
612 unsigned int ndw
= kiq
->pmf
->invalidate_tlbs_size
+ 8;
615 ndw
+= kiq
->pmf
->invalidate_tlbs_size
;
617 spin_lock(&adev
->gfx
.kiq
.ring_lock
);
618 /* 2 dwords flush + 8 dwords fence */
619 amdgpu_ring_alloc(ring
, ndw
);
621 kiq
->pmf
->kiq_invalidate_tlbs(ring
,
623 kiq
->pmf
->kiq_invalidate_tlbs(ring
,
624 pasid
, flush_type
, all_hub
);
625 r
= amdgpu_fence_emit_polling(ring
, &seq
, MAX_KIQ_REG_WAIT
);
627 amdgpu_ring_undo(ring
);
628 spin_unlock(&adev
->gfx
.kiq
.ring_lock
);
632 amdgpu_ring_commit(ring
);
633 spin_unlock(&adev
->gfx
.kiq
.ring_lock
);
634 r
= amdgpu_fence_wait_polling(ring
, seq
, adev
->usec_timeout
);
636 DRM_ERROR("wait for kiq fence error: %ld.\n", r
);
643 for (vmid
= 1; vmid
< 16; vmid
++) {
645 ret
= gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev
, vmid
,
647 if (ret
&& queried_pasid
== pasid
) {
649 for (i
= 0; i
< adev
->num_vmhubs
; i
++)
650 gmc_v9_0_flush_gpu_tlb(adev
, vmid
,
653 gmc_v9_0_flush_gpu_tlb(adev
, vmid
,
654 AMDGPU_GFXHUB_0
, flush_type
);
664 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring
*ring
,
665 unsigned vmid
, uint64_t pd_addr
)
667 bool use_semaphore
= gmc_v9_0_use_invalidate_semaphore(ring
->adev
, ring
->funcs
->vmhub
);
668 struct amdgpu_device
*adev
= ring
->adev
;
669 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[ring
->funcs
->vmhub
];
670 uint32_t req
= gmc_v9_0_get_invalidate_req(vmid
, 0);
671 unsigned eng
= ring
->vm_inv_eng
;
674 * It may lose gpuvm invalidate acknowldege state across power-gating
675 * off cycle, add semaphore acquire before invalidation and semaphore
676 * release after invalidation to avoid entering power gated state
680 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
682 /* a read return value of 1 means semaphore acuqire */
683 amdgpu_ring_emit_reg_wait(ring
,
684 hub
->vm_inv_eng0_sem
+
685 hub
->eng_distance
* eng
, 0x1, 0x1);
687 amdgpu_ring_emit_wreg(ring
, hub
->ctx0_ptb_addr_lo32
+
688 (hub
->ctx_addr_distance
* vmid
),
689 lower_32_bits(pd_addr
));
691 amdgpu_ring_emit_wreg(ring
, hub
->ctx0_ptb_addr_hi32
+
692 (hub
->ctx_addr_distance
* vmid
),
693 upper_32_bits(pd_addr
));
695 amdgpu_ring_emit_reg_write_reg_wait(ring
, hub
->vm_inv_eng0_req
+
696 hub
->eng_distance
* eng
,
697 hub
->vm_inv_eng0_ack
+
698 hub
->eng_distance
* eng
,
701 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
704 * add semaphore release after invalidation,
705 * write with 0 means semaphore release
707 amdgpu_ring_emit_wreg(ring
, hub
->vm_inv_eng0_sem
+
708 hub
->eng_distance
* eng
, 0);
713 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring
*ring
, unsigned vmid
,
716 struct amdgpu_device
*adev
= ring
->adev
;
719 /* Do nothing because there's no lut register for mmhub1. */
720 if (ring
->funcs
->vmhub
== AMDGPU_MMHUB_1
)
723 if (ring
->funcs
->vmhub
== AMDGPU_GFXHUB_0
)
724 reg
= SOC15_REG_OFFSET(OSSSYS
, 0, mmIH_VMID_0_LUT
) + vmid
;
726 reg
= SOC15_REG_OFFSET(OSSSYS
, 0, mmIH_VMID_0_LUT_MM
) + vmid
;
728 amdgpu_ring_emit_wreg(ring
, reg
, pasid
);
732 * PTE format on VEGA 10:
741 * 47:12 4k physical page base address
751 * PDE format on VEGA 10:
752 * 63:59 block fragment size
756 * 47:6 physical base address of PD or PTE
763 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device
*adev
, uint32_t flags
)
767 case AMDGPU_VM_MTYPE_DEFAULT
:
768 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC
);
769 case AMDGPU_VM_MTYPE_NC
:
770 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC
);
771 case AMDGPU_VM_MTYPE_WC
:
772 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC
);
773 case AMDGPU_VM_MTYPE_RW
:
774 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW
);
775 case AMDGPU_VM_MTYPE_CC
:
776 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC
);
777 case AMDGPU_VM_MTYPE_UC
:
778 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC
);
780 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC
);
784 static void gmc_v9_0_get_vm_pde(struct amdgpu_device
*adev
, int level
,
785 uint64_t *addr
, uint64_t *flags
)
787 if (!(*flags
& AMDGPU_PDE_PTE
) && !(*flags
& AMDGPU_PTE_SYSTEM
))
788 *addr
= adev
->vm_manager
.vram_base_offset
+ *addr
-
789 adev
->gmc
.vram_start
;
790 BUG_ON(*addr
& 0xFFFF00000000003FULL
);
792 if (!adev
->gmc
.translate_further
)
795 if (level
== AMDGPU_VM_PDB1
) {
796 /* Set the block fragment size */
797 if (!(*flags
& AMDGPU_PDE_PTE
))
798 *flags
|= AMDGPU_PDE_BFS(0x9);
800 } else if (level
== AMDGPU_VM_PDB0
) {
801 if (*flags
& AMDGPU_PDE_PTE
)
802 *flags
&= ~AMDGPU_PDE_PTE
;
804 *flags
|= AMDGPU_PTE_TF
;
808 static void gmc_v9_0_get_vm_pte(struct amdgpu_device
*adev
,
809 struct amdgpu_bo_va_mapping
*mapping
,
812 *flags
&= ~AMDGPU_PTE_EXECUTABLE
;
813 *flags
|= mapping
->flags
& AMDGPU_PTE_EXECUTABLE
;
815 *flags
&= ~AMDGPU_PTE_MTYPE_VG10_MASK
;
816 *flags
|= mapping
->flags
& AMDGPU_PTE_MTYPE_VG10_MASK
;
818 if (mapping
->flags
& AMDGPU_PTE_PRT
) {
819 *flags
|= AMDGPU_PTE_PRT
;
820 *flags
&= ~AMDGPU_PTE_VALID
;
823 if (adev
->asic_type
== CHIP_ARCTURUS
&&
824 !(*flags
& AMDGPU_PTE_SYSTEM
) &&
825 mapping
->bo_va
->is_xgmi
)
826 *flags
|= AMDGPU_PTE_SNOOPED
;
829 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs
= {
830 .flush_gpu_tlb
= gmc_v9_0_flush_gpu_tlb
,
831 .flush_gpu_tlb_pasid
= gmc_v9_0_flush_gpu_tlb_pasid
,
832 .emit_flush_gpu_tlb
= gmc_v9_0_emit_flush_gpu_tlb
,
833 .emit_pasid_mapping
= gmc_v9_0_emit_pasid_mapping
,
834 .map_mtype
= gmc_v9_0_map_mtype
,
835 .get_vm_pde
= gmc_v9_0_get_vm_pde
,
836 .get_vm_pte
= gmc_v9_0_get_vm_pte
839 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device
*adev
)
841 adev
->gmc
.gmc_funcs
= &gmc_v9_0_gmc_funcs
;
844 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device
*adev
)
846 switch (adev
->asic_type
) {
848 adev
->umc
.funcs
= &umc_v6_0_funcs
;
851 adev
->umc
.max_ras_err_cnt_per_query
= UMC_V6_1_TOTAL_CHANNEL_NUM
;
852 adev
->umc
.channel_inst_num
= UMC_V6_1_CHANNEL_INSTANCE_NUM
;
853 adev
->umc
.umc_inst_num
= UMC_V6_1_UMC_INSTANCE_NUM
;
854 adev
->umc
.channel_offs
= UMC_V6_1_PER_CHANNEL_OFFSET_VG20
;
855 adev
->umc
.channel_idx_tbl
= &umc_v6_1_channel_idx_tbl
[0][0];
856 adev
->umc
.funcs
= &umc_v6_1_funcs
;
859 adev
->umc
.max_ras_err_cnt_per_query
= UMC_V6_1_TOTAL_CHANNEL_NUM
;
860 adev
->umc
.channel_inst_num
= UMC_V6_1_CHANNEL_INSTANCE_NUM
;
861 adev
->umc
.umc_inst_num
= UMC_V6_1_UMC_INSTANCE_NUM
;
862 adev
->umc
.channel_offs
= UMC_V6_1_PER_CHANNEL_OFFSET_ARCT
;
863 adev
->umc
.channel_idx_tbl
= &umc_v6_1_channel_idx_tbl
[0][0];
864 adev
->umc
.funcs
= &umc_v6_1_funcs
;
871 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device
*adev
)
873 switch (adev
->asic_type
) {
875 adev
->mmhub
.funcs
= &mmhub_v1_0_funcs
;
878 adev
->mmhub
.funcs
= &mmhub_v9_4_funcs
;
885 static int gmc_v9_0_early_init(void *handle
)
887 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
889 gmc_v9_0_set_gmc_funcs(adev
);
890 gmc_v9_0_set_irq_funcs(adev
);
891 gmc_v9_0_set_umc_funcs(adev
);
892 gmc_v9_0_set_mmhub_funcs(adev
);
894 adev
->gmc
.shared_aperture_start
= 0x2000000000000000ULL
;
895 adev
->gmc
.shared_aperture_end
=
896 adev
->gmc
.shared_aperture_start
+ (4ULL << 30) - 1;
897 adev
->gmc
.private_aperture_start
= 0x1000000000000000ULL
;
898 adev
->gmc
.private_aperture_end
=
899 adev
->gmc
.private_aperture_start
+ (4ULL << 30) - 1;
904 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device
*adev
)
909 * Currently there is a bug where some memory client outside
910 * of the driver writes to first 8M of VRAM on S3 resume,
911 * this overrides GART which by default gets placed in first 8M and
912 * causes VM_FAULTS once GTT is accessed.
913 * Keep the stolen memory reservation until the while this is not solved.
914 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
916 switch (adev
->asic_type
) {
929 static int gmc_v9_0_late_init(void *handle
)
931 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
934 if (!gmc_v9_0_keep_stolen_memory(adev
))
935 amdgpu_bo_late_init(adev
);
937 r
= amdgpu_gmc_allocate_vm_inv_eng(adev
);
940 /* Check if ecc is available */
941 if (!amdgpu_sriov_vf(adev
) && (adev
->asic_type
== CHIP_VEGA10
)) {
942 r
= amdgpu_atomfirmware_mem_ecc_supported(adev
);
944 DRM_INFO("ECC is not present.\n");
945 if (adev
->df
.funcs
->enable_ecc_force_par_wr_rmw
)
946 adev
->df
.funcs
->enable_ecc_force_par_wr_rmw(adev
, false);
948 DRM_INFO("ECC is active.\n");
950 r
= amdgpu_atomfirmware_sram_ecc_supported(adev
);
952 DRM_INFO("SRAM ECC is not present.\n");
954 DRM_INFO("SRAM ECC is active.\n");
957 if (adev
->mmhub
.funcs
&& adev
->mmhub
.funcs
->reset_ras_error_count
)
958 adev
->mmhub
.funcs
->reset_ras_error_count(adev
);
960 r
= amdgpu_gmc_ras_late_init(adev
);
964 return amdgpu_irq_get(adev
, &adev
->gmc
.vm_fault
, 0);
967 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device
*adev
,
968 struct amdgpu_gmc
*mc
)
972 if (adev
->asic_type
== CHIP_ARCTURUS
)
973 base
= mmhub_v9_4_get_fb_location(adev
);
974 else if (!amdgpu_sriov_vf(adev
))
975 base
= mmhub_v1_0_get_fb_location(adev
);
977 /* add the xgmi offset of the physical node */
978 base
+= adev
->gmc
.xgmi
.physical_node_id
* adev
->gmc
.xgmi
.node_segment_size
;
979 amdgpu_gmc_vram_location(adev
, mc
, base
);
980 amdgpu_gmc_gart_location(adev
, mc
);
981 amdgpu_gmc_agp_location(adev
, mc
);
982 /* base offset of vram pages */
983 adev
->vm_manager
.vram_base_offset
= gfxhub_v1_0_get_mc_fb_offset(adev
);
985 /* XXX: add the xgmi offset of the physical node? */
986 adev
->vm_manager
.vram_base_offset
+=
987 adev
->gmc
.xgmi
.physical_node_id
* adev
->gmc
.xgmi
.node_segment_size
;
991 * gmc_v9_0_mc_init - initialize the memory controller driver params
993 * @adev: amdgpu_device pointer
995 * Look up the amount of vram, vram width, and decide how to place
996 * vram and gart within the GPU's physical address space.
997 * Returns 0 for success.
999 static int gmc_v9_0_mc_init(struct amdgpu_device
*adev
)
1003 /* size in MB on si */
1004 adev
->gmc
.mc_vram_size
=
1005 adev
->nbio
.funcs
->get_memsize(adev
) * 1024ULL * 1024ULL;
1006 adev
->gmc
.real_vram_size
= adev
->gmc
.mc_vram_size
;
1008 if (!(adev
->flags
& AMD_IS_APU
)) {
1009 r
= amdgpu_device_resize_fb_bar(adev
);
1013 adev
->gmc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
1014 adev
->gmc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
1016 #ifdef CONFIG_X86_64
1017 if (adev
->flags
& AMD_IS_APU
) {
1018 adev
->gmc
.aper_base
= gfxhub_v1_0_get_mc_fb_offset(adev
);
1019 adev
->gmc
.aper_size
= adev
->gmc
.real_vram_size
;
1022 /* In case the PCI BAR is larger than the actual amount of vram */
1023 adev
->gmc
.visible_vram_size
= adev
->gmc
.aper_size
;
1024 if (adev
->gmc
.visible_vram_size
> adev
->gmc
.real_vram_size
)
1025 adev
->gmc
.visible_vram_size
= adev
->gmc
.real_vram_size
;
1027 /* set the gart size */
1028 if (amdgpu_gart_size
== -1) {
1029 switch (adev
->asic_type
) {
1030 case CHIP_VEGA10
: /* all engines support GPUVM */
1031 case CHIP_VEGA12
: /* all engines support GPUVM */
1035 adev
->gmc
.gart_size
= 512ULL << 20;
1037 case CHIP_RAVEN
: /* DCE SG support */
1039 adev
->gmc
.gart_size
= 1024ULL << 20;
1043 adev
->gmc
.gart_size
= (u64
)amdgpu_gart_size
<< 20;
1046 gmc_v9_0_vram_gtt_location(adev
, &adev
->gmc
);
1051 static int gmc_v9_0_gart_init(struct amdgpu_device
*adev
)
1055 if (adev
->gart
.bo
) {
1056 WARN(1, "VEGA10 PCIE GART already initialized\n");
1059 /* Initialize common gart structure */
1060 r
= amdgpu_gart_init(adev
);
1063 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
1064 adev
->gart
.gart_pte_flags
= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC
) |
1065 AMDGPU_PTE_EXECUTABLE
;
1066 return amdgpu_gart_table_vram_alloc(adev
);
1069 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device
*adev
)
1075 * TODO Remove once GART corruption is resolved
1076 * Check related code in gmc_v9_0_sw_fini
1078 if (gmc_v9_0_keep_stolen_memory(adev
))
1079 return 9 * 1024 * 1024;
1081 d1vga_control
= RREG32_SOC15(DCE
, 0, mmD1VGA_CONTROL
);
1082 if (REG_GET_FIELD(d1vga_control
, D1VGA_CONTROL
, D1VGA_MODE_ENABLE
)) {
1083 size
= 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1087 switch (adev
->asic_type
) {
1090 viewport
= RREG32_SOC15(DCE
, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
);
1091 size
= (REG_GET_FIELD(viewport
,
1092 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
, PRI_VIEWPORT_HEIGHT
) *
1093 REG_GET_FIELD(viewport
,
1094 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
, PRI_VIEWPORT_WIDTH
) *
1101 viewport
= RREG32_SOC15(DCE
, 0, mmSCL0_VIEWPORT_SIZE
);
1102 size
= (REG_GET_FIELD(viewport
, SCL0_VIEWPORT_SIZE
, VIEWPORT_HEIGHT
) *
1103 REG_GET_FIELD(viewport
, SCL0_VIEWPORT_SIZE
, VIEWPORT_WIDTH
) *
1108 /* return 0 if the pre-OS buffer uses up most of vram */
1109 if ((adev
->gmc
.real_vram_size
- size
) < (8 * 1024 * 1024))
1115 static int gmc_v9_0_sw_init(void *handle
)
1117 int r
, vram_width
= 0, vram_type
= 0, vram_vendor
= 0;
1118 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1120 gfxhub_v1_0_init(adev
);
1121 if (adev
->asic_type
== CHIP_ARCTURUS
)
1122 mmhub_v9_4_init(adev
);
1124 mmhub_v1_0_init(adev
);
1126 spin_lock_init(&adev
->gmc
.invalidate_lock
);
1128 r
= amdgpu_atomfirmware_get_vram_info(adev
,
1129 &vram_width
, &vram_type
, &vram_vendor
);
1130 if (amdgpu_sriov_vf(adev
))
1131 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1132 * and DF related registers is not readable, seems hardcord is the
1133 * only way to set the correct vram_width
1135 adev
->gmc
.vram_width
= 2048;
1136 else if (amdgpu_emu_mode
!= 1)
1137 adev
->gmc
.vram_width
= vram_width
;
1139 if (!adev
->gmc
.vram_width
) {
1140 int chansize
, numchan
;
1142 /* hbm memory channel size */
1143 if (adev
->flags
& AMD_IS_APU
)
1148 numchan
= adev
->df
.funcs
->get_hbm_channel_number(adev
);
1149 adev
->gmc
.vram_width
= numchan
* chansize
;
1152 adev
->gmc
.vram_type
= vram_type
;
1153 adev
->gmc
.vram_vendor
= vram_vendor
;
1154 switch (adev
->asic_type
) {
1156 adev
->num_vmhubs
= 2;
1158 if (adev
->rev_id
== 0x0 || adev
->rev_id
== 0x1) {
1159 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 48);
1161 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1162 amdgpu_vm_adjust_size(adev
, 128 * 1024 + 512, 9, 2, 48);
1163 adev
->gmc
.translate_further
=
1164 adev
->vm_manager
.num_level
> 1;
1171 adev
->num_vmhubs
= 2;
1175 * To fulfill 4-level page support,
1176 * vm size is 256TB (48bit), maximum size of Vega10,
1177 * block size 512 (9bit)
1179 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1180 if (amdgpu_sriov_vf(adev
))
1181 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 47);
1183 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 48);
1186 adev
->num_vmhubs
= 3;
1188 /* Keep the vm size same with Vega20 */
1189 amdgpu_vm_adjust_size(adev
, 256 * 1024, 9, 3, 48);
1195 /* This interrupt is VMC page fault.*/
1196 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VMC
, VMC_1_0__SRCID__VM_FAULT
,
1197 &adev
->gmc
.vm_fault
);
1201 if (adev
->asic_type
== CHIP_ARCTURUS
) {
1202 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_VMC1
, VMC_1_0__SRCID__VM_FAULT
,
1203 &adev
->gmc
.vm_fault
);
1208 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_UTCL2
, UTCL2_1_0__SRCID__FAULT
,
1209 &adev
->gmc
.vm_fault
);
1214 if (!amdgpu_sriov_vf(adev
)) {
1215 /* interrupt sent to DF. */
1216 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DF
, 0,
1217 &adev
->gmc
.ecc_irq
);
1222 /* Set the internal MC address mask
1223 * This is the max address of the GPU's
1224 * internal address space.
1226 adev
->gmc
.mc_mask
= 0xffffffffffffULL
; /* 48 bit MC */
1228 r
= dma_set_mask_and_coherent(adev
->dev
, DMA_BIT_MASK(44));
1230 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
1233 adev
->need_swiotlb
= drm_need_swiotlb(44);
1235 if (adev
->gmc
.xgmi
.supported
) {
1236 r
= gfxhub_v1_1_get_xgmi_info(adev
);
1241 r
= gmc_v9_0_mc_init(adev
);
1245 adev
->gmc
.stolen_size
= gmc_v9_0_get_vbios_fb_size(adev
);
1247 /* Memory manager */
1248 r
= amdgpu_bo_init(adev
);
1252 r
= gmc_v9_0_gart_init(adev
);
1258 * VMID 0 is reserved for System
1259 * amdgpu graphics/compute will use VMIDs 1..n-1
1260 * amdkfd will use VMIDs n..15
1262 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1263 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1264 * for video processing.
1266 adev
->vm_manager
.first_kfd_vmid
=
1267 adev
->asic_type
== CHIP_ARCTURUS
? 3 : 8;
1269 amdgpu_vm_manager_init(adev
);
1274 static int gmc_v9_0_sw_fini(void *handle
)
1276 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1277 void *stolen_vga_buf
;
1279 amdgpu_gmc_ras_fini(adev
);
1280 amdgpu_gem_force_release(adev
);
1281 amdgpu_vm_manager_fini(adev
);
1283 if (gmc_v9_0_keep_stolen_memory(adev
))
1284 amdgpu_bo_free_kernel(&adev
->stolen_vga_memory
, NULL
, &stolen_vga_buf
);
1286 amdgpu_gart_table_vram_free(adev
);
1287 amdgpu_bo_fini(adev
);
1288 amdgpu_gart_fini(adev
);
1293 static void gmc_v9_0_init_golden_registers(struct amdgpu_device
*adev
)
1296 switch (adev
->asic_type
) {
1298 if (amdgpu_sriov_vf(adev
))
1302 soc15_program_register_sequence(adev
,
1303 golden_settings_mmhub_1_0_0
,
1304 ARRAY_SIZE(golden_settings_mmhub_1_0_0
));
1305 soc15_program_register_sequence(adev
,
1306 golden_settings_athub_1_0_0
,
1307 ARRAY_SIZE(golden_settings_athub_1_0_0
));
1312 /* TODO for renoir */
1313 soc15_program_register_sequence(adev
,
1314 golden_settings_athub_1_0_0
,
1315 ARRAY_SIZE(golden_settings_athub_1_0_0
));
1323 * gmc_v9_0_restore_registers - restores regs
1325 * @adev: amdgpu_device pointer
1327 * This restores register values, saved at suspend.
1329 static void gmc_v9_0_restore_registers(struct amdgpu_device
*adev
)
1331 if (adev
->asic_type
== CHIP_RAVEN
)
1332 WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0
, adev
->gmc
.sdpif_register
);
1336 * gmc_v9_0_gart_enable - gart enable
1338 * @adev: amdgpu_device pointer
1340 static int gmc_v9_0_gart_enable(struct amdgpu_device
*adev
)
1344 if (adev
->gart
.bo
== NULL
) {
1345 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
1348 r
= amdgpu_gart_table_vram_pin(adev
);
1352 r
= gfxhub_v1_0_gart_enable(adev
);
1356 if (adev
->asic_type
== CHIP_ARCTURUS
)
1357 r
= mmhub_v9_4_gart_enable(adev
);
1359 r
= mmhub_v1_0_gart_enable(adev
);
1363 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1364 (unsigned)(adev
->gmc
.gart_size
>> 20),
1365 (unsigned long long)amdgpu_bo_gpu_offset(adev
->gart
.bo
));
1366 adev
->gart
.ready
= true;
1370 static int gmc_v9_0_hw_init(void *handle
)
1372 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1377 /* The sequence of these two function calls matters.*/
1378 gmc_v9_0_init_golden_registers(adev
);
1380 if (adev
->mode_info
.num_crtc
) {
1381 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1382 /* Lockout access through VGA aperture*/
1383 WREG32_FIELD15(DCE
, 0, VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
, 1);
1385 /* disable VGA render */
1386 WREG32_FIELD15(DCE
, 0, VGA_RENDER_CONTROL
, VGA_VSTATUS_CNTL
, 0);
1390 amdgpu_device_program_register_sequence(adev
,
1391 golden_settings_vega10_hdp
,
1392 ARRAY_SIZE(golden_settings_vega10_hdp
));
1394 switch (adev
->asic_type
) {
1396 /* TODO for renoir */
1397 mmhub_v1_0_update_power_gating(adev
, true);
1400 WREG32_FIELD15(HDP
, 0, HDP_MMHUB_CNTL
, HDP_MMHUB_GCC
, 1);
1406 WREG32_FIELD15(HDP
, 0, HDP_MISC_CNTL
, FLUSH_INVALIDATE_CACHE
, 1);
1408 tmp
= RREG32_SOC15(HDP
, 0, mmHDP_HOST_PATH_CNTL
);
1409 WREG32_SOC15(HDP
, 0, mmHDP_HOST_PATH_CNTL
, tmp
);
1411 WREG32_SOC15(HDP
, 0, mmHDP_NONSURFACE_BASE
, (adev
->gmc
.vram_start
>> 8));
1412 WREG32_SOC15(HDP
, 0, mmHDP_NONSURFACE_BASE_HI
, (adev
->gmc
.vram_start
>> 40));
1414 /* After HDP is initialized, flush HDP.*/
1415 adev
->nbio
.funcs
->hdp_flush(adev
, NULL
);
1417 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
1422 if (!amdgpu_sriov_vf(adev
)) {
1423 gfxhub_v1_0_set_fault_enable_default(adev
, value
);
1424 if (adev
->asic_type
== CHIP_ARCTURUS
)
1425 mmhub_v9_4_set_fault_enable_default(adev
, value
);
1427 mmhub_v1_0_set_fault_enable_default(adev
, value
);
1429 for (i
= 0; i
< adev
->num_vmhubs
; ++i
)
1430 gmc_v9_0_flush_gpu_tlb(adev
, 0, i
, 0);
1432 if (adev
->umc
.funcs
&& adev
->umc
.funcs
->init_registers
)
1433 adev
->umc
.funcs
->init_registers(adev
);
1435 r
= gmc_v9_0_gart_enable(adev
);
1441 * gmc_v9_0_save_registers - saves regs
1443 * @adev: amdgpu_device pointer
1445 * This saves potential register values that should be
1446 * restored upon resume
1448 static void gmc_v9_0_save_registers(struct amdgpu_device
*adev
)
1450 if (adev
->asic_type
== CHIP_RAVEN
)
1451 adev
->gmc
.sdpif_register
= RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0
);
1455 * gmc_v9_0_gart_disable - gart disable
1457 * @adev: amdgpu_device pointer
1459 * This disables all VM page table.
1461 static void gmc_v9_0_gart_disable(struct amdgpu_device
*adev
)
1463 gfxhub_v1_0_gart_disable(adev
);
1464 if (adev
->asic_type
== CHIP_ARCTURUS
)
1465 mmhub_v9_4_gart_disable(adev
);
1467 mmhub_v1_0_gart_disable(adev
);
1468 amdgpu_gart_table_vram_unpin(adev
);
1471 static int gmc_v9_0_hw_fini(void *handle
)
1473 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1475 if (amdgpu_sriov_vf(adev
)) {
1476 /* full access mode, so don't touch any GMC register */
1477 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1481 amdgpu_irq_put(adev
, &adev
->gmc
.ecc_irq
, 0);
1482 amdgpu_irq_put(adev
, &adev
->gmc
.vm_fault
, 0);
1483 gmc_v9_0_gart_disable(adev
);
1488 static int gmc_v9_0_suspend(void *handle
)
1491 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1493 r
= gmc_v9_0_hw_fini(adev
);
1497 gmc_v9_0_save_registers(adev
);
1502 static int gmc_v9_0_resume(void *handle
)
1505 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1507 gmc_v9_0_restore_registers(adev
);
1508 r
= gmc_v9_0_hw_init(adev
);
1512 amdgpu_vmid_reset_all(adev
);
1517 static bool gmc_v9_0_is_idle(void *handle
)
1519 /* MC is always ready in GMC v9.*/
1523 static int gmc_v9_0_wait_for_idle(void *handle
)
1525 /* There is no need to wait for MC idle in GMC v9.*/
1529 static int gmc_v9_0_soft_reset(void *handle
)
1531 /* XXX for emulation.*/
1535 static int gmc_v9_0_set_clockgating_state(void *handle
,
1536 enum amd_clockgating_state state
)
1538 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1540 if (adev
->asic_type
== CHIP_ARCTURUS
)
1541 mmhub_v9_4_set_clockgating(adev
, state
);
1543 mmhub_v1_0_set_clockgating(adev
, state
);
1545 athub_v1_0_set_clockgating(adev
, state
);
1550 static void gmc_v9_0_get_clockgating_state(void *handle
, u32
*flags
)
1552 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1554 if (adev
->asic_type
== CHIP_ARCTURUS
)
1555 mmhub_v9_4_get_clockgating(adev
, flags
);
1557 mmhub_v1_0_get_clockgating(adev
, flags
);
1559 athub_v1_0_get_clockgating(adev
, flags
);
1562 static int gmc_v9_0_set_powergating_state(void *handle
,
1563 enum amd_powergating_state state
)
1568 const struct amd_ip_funcs gmc_v9_0_ip_funcs
= {
1570 .early_init
= gmc_v9_0_early_init
,
1571 .late_init
= gmc_v9_0_late_init
,
1572 .sw_init
= gmc_v9_0_sw_init
,
1573 .sw_fini
= gmc_v9_0_sw_fini
,
1574 .hw_init
= gmc_v9_0_hw_init
,
1575 .hw_fini
= gmc_v9_0_hw_fini
,
1576 .suspend
= gmc_v9_0_suspend
,
1577 .resume
= gmc_v9_0_resume
,
1578 .is_idle
= gmc_v9_0_is_idle
,
1579 .wait_for_idle
= gmc_v9_0_wait_for_idle
,
1580 .soft_reset
= gmc_v9_0_soft_reset
,
1581 .set_clockgating_state
= gmc_v9_0_set_clockgating_state
,
1582 .set_powergating_state
= gmc_v9_0_set_powergating_state
,
1583 .get_clockgating_state
= gmc_v9_0_get_clockgating_state
,
1586 const struct amdgpu_ip_block_version gmc_v9_0_ip_block
=
1588 .type
= AMD_IP_BLOCK_TYPE_GMC
,
1592 .funcs
= &gmc_v9_0_ip_funcs
,