2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "mmhub_v2_0.h"
27 #include "mmhub/mmhub_2_0_0_offset.h"
28 #include "mmhub/mmhub_2_0_0_sh_mask.h"
29 #include "mmhub/mmhub_2_0_0_default.h"
30 #include "navi10_enum.h"
32 #include "soc15_common.h"
34 #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d
35 #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX 0
36 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070
37 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0
39 static const char *mmhub_client_ids_navi1x
[][2] = {
66 static const char *mmhub_client_ids_sienna_cichlid
[][2] = {
96 static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid
,
101 /* invalidate using legacy mode on vmid*/
102 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
,
103 PER_VMID_INVALIDATE_REQ
, 1 << vmid
);
104 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
, FLUSH_TYPE
, flush_type
);
105 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PTES
, 1);
106 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE0
, 1);
107 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE1
, 1);
108 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
, INVALIDATE_L2_PDE2
, 1);
109 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
, INVALIDATE_L1_PTES
, 1);
110 req
= REG_SET_FIELD(req
, MMVM_INVALIDATE_ENG0_REQ
,
111 CLEAR_PROTECTION_FAULT_STATUS_ADDR
, 0);
117 mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device
*adev
,
121 const char *mmhub_cid
= NULL
;
123 cid
= REG_GET_FIELD(status
,
124 MMVM_L2_PROTECTION_FAULT_STATUS
, CID
);
125 rw
= REG_GET_FIELD(status
,
126 MMVM_L2_PROTECTION_FAULT_STATUS
, RW
);
129 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
131 switch (adev
->asic_type
) {
135 mmhub_cid
= mmhub_client_ids_navi1x
[cid
][rw
];
137 case CHIP_SIENNA_CICHLID
:
138 case CHIP_NAVY_FLOUNDER
:
139 mmhub_cid
= mmhub_client_ids_sienna_cichlid
[cid
][rw
];
145 dev_err(adev
->dev
, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
146 mmhub_cid
? mmhub_cid
: "unknown", cid
);
147 dev_err(adev
->dev
, "\t MORE_FAULTS: 0x%lx\n",
148 REG_GET_FIELD(status
,
149 MMVM_L2_PROTECTION_FAULT_STATUS
, MORE_FAULTS
));
150 dev_err(adev
->dev
, "\t WALKER_ERROR: 0x%lx\n",
151 REG_GET_FIELD(status
,
152 MMVM_L2_PROTECTION_FAULT_STATUS
, WALKER_ERROR
));
153 dev_err(adev
->dev
, "\t PERMISSION_FAULTS: 0x%lx\n",
154 REG_GET_FIELD(status
,
155 MMVM_L2_PROTECTION_FAULT_STATUS
, PERMISSION_FAULTS
));
156 dev_err(adev
->dev
, "\t MAPPING_ERROR: 0x%lx\n",
157 REG_GET_FIELD(status
,
158 MMVM_L2_PROTECTION_FAULT_STATUS
, MAPPING_ERROR
));
159 dev_err(adev
->dev
, "\t RW: 0x%x\n", rw
);
162 static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device
*adev
, uint32_t vmid
,
163 uint64_t page_table_base
)
165 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[AMDGPU_MMHUB_0
];
167 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
,
168 hub
->ctx_addr_distance
* vmid
,
169 lower_32_bits(page_table_base
));
171 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
,
172 hub
->ctx_addr_distance
* vmid
,
173 upper_32_bits(page_table_base
));
176 static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device
*adev
)
178 uint64_t pt_base
= amdgpu_gmc_pd_addr(adev
->gart
.bo
);
180 mmhub_v2_0_setup_vm_pt_regs(adev
, 0, pt_base
);
182 WREG32_SOC15(MMHUB
, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
,
183 (u32
)(adev
->gmc
.gart_start
>> 12));
184 WREG32_SOC15(MMHUB
, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
,
185 (u32
)(adev
->gmc
.gart_start
>> 44));
187 WREG32_SOC15(MMHUB
, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
,
188 (u32
)(adev
->gmc
.gart_end
>> 12));
189 WREG32_SOC15(MMHUB
, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
,
190 (u32
)(adev
->gmc
.gart_end
>> 44));
193 static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device
*adev
)
199 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_AGP_BASE
, 0);
200 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_AGP_TOP
, 0);
201 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_AGP_BOT
, 0x00FFFFFF);
203 if (!amdgpu_sriov_vf(adev
)) {
204 /* Program the system aperture low logical page number. */
205 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
206 adev
->gmc
.vram_start
>> 18);
207 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
208 adev
->gmc
.vram_end
>> 18);
211 /* Set default page address. */
212 value
= adev
->vram_scratch
.gpu_addr
- adev
->gmc
.vram_start
+
213 adev
->vm_manager
.vram_base_offset
;
214 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
,
216 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
,
219 /* Program "protection fault". */
220 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
,
221 (u32
)(adev
->dummy_page_addr
>> 12));
222 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
,
223 (u32
)((u64
)adev
->dummy_page_addr
>> 44));
225 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2
);
226 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL2
,
227 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY
, 1);
228 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL2
, tmp
);
231 static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device
*adev
)
235 /* Setup TLB control */
236 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMMC_VM_MX_L1_TLB_CNTL
);
238 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
239 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE
, 3);
240 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
,
241 ENABLE_ADVANCED_DRIVER_MODEL
, 1);
242 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
,
243 SYSTEM_APERTURE_UNMAPPED_ACCESS
, 0);
244 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
, ECO_BITS
, 0);
245 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
,
246 MTYPE
, MTYPE_UC
); /* UC, uncached */
248 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_MX_L1_TLB_CNTL
, tmp
);
251 static void mmhub_v2_0_init_cache_regs(struct amdgpu_device
*adev
)
255 /* These registers are not accessible to VF-SRIOV.
256 * The PF will program them instead.
258 if (amdgpu_sriov_vf(adev
))
262 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL
);
263 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
264 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
, 0);
265 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
,
266 ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY
, 1);
267 /* XXX for emulation, Refer to closed source code.*/
268 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
, L2_PDE0_CACHE_TAG_GENERATION_MODE
,
270 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
, PDE_FAULT_CLASSIFICATION
, 0);
271 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
, CONTEXT1_IDENTITY_ACCESS_MODE
, 1);
272 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
, IDENTITY_MODE_FRAGMENT_SIZE
, 0);
273 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL
, tmp
);
275 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL2
);
276 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
277 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
278 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL2
, tmp
);
280 tmp
= mmMMVM_L2_CNTL3_DEFAULT
;
281 if (adev
->gmc
.translate_further
) {
282 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL3
, BANK_SELECT
, 12);
283 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL3
,
284 L2_CACHE_BIGK_FRAGMENT_SIZE
, 9);
286 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL3
, BANK_SELECT
, 9);
287 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL3
,
288 L2_CACHE_BIGK_FRAGMENT_SIZE
, 6);
290 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL3
, tmp
);
292 tmp
= mmMMVM_L2_CNTL4_DEFAULT
;
293 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL4
, VMC_TAP_PDE_REQUEST_PHYSICAL
, 0);
294 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL4
, VMC_TAP_PTE_REQUEST_PHYSICAL
, 0);
295 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL4
, tmp
);
297 tmp
= mmMMVM_L2_CNTL5_DEFAULT
;
298 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL5
, L2_CACHE_SMALLK_FRAGMENT_SIZE
, 0);
299 WREG32_SOC15(GC
, 0, mmMMVM_L2_CNTL5
, tmp
);
302 static void mmhub_v2_0_enable_system_domain(struct amdgpu_device
*adev
)
306 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMVM_CONTEXT0_CNTL
);
307 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
308 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
309 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT0_CNTL
,
310 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT
, 0);
311 WREG32_SOC15(MMHUB
, 0, mmMMVM_CONTEXT0_CNTL
, tmp
);
314 static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device
*adev
)
316 /* These registers are not accessible to VF-SRIOV.
317 * The PF will program them instead.
319 if (amdgpu_sriov_vf(adev
))
322 WREG32_SOC15(MMHUB
, 0,
323 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
,
325 WREG32_SOC15(MMHUB
, 0,
326 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
,
329 WREG32_SOC15(MMHUB
, 0,
330 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
, 0);
331 WREG32_SOC15(MMHUB
, 0,
332 mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
, 0);
334 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
,
336 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
,
340 static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device
*adev
)
342 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[AMDGPU_MMHUB_0
];
346 for (i
= 0; i
<= 14; i
++) {
347 tmp
= RREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT1_CNTL
, i
);
348 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
, ENABLE_CONTEXT
, 1);
349 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
, PAGE_TABLE_DEPTH
,
350 adev
->vm_manager
.num_level
);
351 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
352 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
353 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
354 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
,
356 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
357 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
358 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
359 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
360 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
361 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
362 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
363 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
364 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
365 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
366 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
367 PAGE_TABLE_BLOCK_SIZE
,
368 adev
->vm_manager
.block_size
- 9);
369 /* Send no-retry XNACK on fault to suppress VM fault storm. */
370 tmp
= REG_SET_FIELD(tmp
, MMVM_CONTEXT1_CNTL
,
371 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT
,
373 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT1_CNTL
,
374 i
* hub
->ctx_distance
, tmp
);
375 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
,
376 i
* hub
->ctx_addr_distance
, 0);
377 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
,
378 i
* hub
->ctx_addr_distance
, 0);
379 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
,
380 i
* hub
->ctx_addr_distance
,
381 lower_32_bits(adev
->vm_manager
.max_pfn
- 1));
382 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
,
383 i
* hub
->ctx_addr_distance
,
384 upper_32_bits(adev
->vm_manager
.max_pfn
- 1));
388 static void mmhub_v2_0_program_invalidation(struct amdgpu_device
*adev
)
390 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[AMDGPU_MMHUB_0
];
393 for (i
= 0; i
< 18; ++i
) {
394 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32
,
395 i
* hub
->eng_addr_distance
, 0xffffffff);
396 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32
,
397 i
* hub
->eng_addr_distance
, 0x1f);
401 static int mmhub_v2_0_gart_enable(struct amdgpu_device
*adev
)
404 mmhub_v2_0_init_gart_aperture_regs(adev
);
405 mmhub_v2_0_init_system_aperture_regs(adev
);
406 mmhub_v2_0_init_tlb_regs(adev
);
407 mmhub_v2_0_init_cache_regs(adev
);
409 mmhub_v2_0_enable_system_domain(adev
);
410 mmhub_v2_0_disable_identity_aperture(adev
);
411 mmhub_v2_0_setup_vmid_config(adev
);
412 mmhub_v2_0_program_invalidation(adev
);
417 static void mmhub_v2_0_gart_disable(struct amdgpu_device
*adev
)
419 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[AMDGPU_MMHUB_0
];
423 /* Disable all tables */
424 for (i
= 0; i
< 16; i
++)
425 WREG32_SOC15_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT0_CNTL
,
426 i
* hub
->ctx_distance
, 0);
428 /* Setup TLB control */
429 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMMC_VM_MX_L1_TLB_CNTL
);
430 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
431 tmp
= REG_SET_FIELD(tmp
, MMMC_VM_MX_L1_TLB_CNTL
,
432 ENABLE_ADVANCED_DRIVER_MODEL
, 0);
433 WREG32_SOC15(MMHUB
, 0, mmMMMC_VM_MX_L1_TLB_CNTL
, tmp
);
436 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL
);
437 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
438 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL
, tmp
);
439 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_CNTL3
, 0);
443 * mmhub_v2_0_set_fault_enable_default - update GART/VM fault handling
445 * @adev: amdgpu_device pointer
446 * @value: true redirects VM faults to the default page
448 static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device
*adev
, bool value
)
452 /* These registers are not accessible to VF-SRIOV.
453 * The PF will program them instead.
455 if (amdgpu_sriov_vf(adev
))
458 tmp
= RREG32_SOC15(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL
);
459 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
460 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
461 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
462 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
463 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
464 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
465 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
466 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
467 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
468 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT
,
470 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
471 NACK_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
472 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
473 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
474 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
475 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
476 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
477 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
478 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
479 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
480 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
481 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
483 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
484 CRASH_ON_NO_RETRY_FAULT
, 1);
485 tmp
= REG_SET_FIELD(tmp
, MMVM_L2_PROTECTION_FAULT_CNTL
,
486 CRASH_ON_RETRY_FAULT
, 1);
488 WREG32_SOC15(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL
, tmp
);
491 static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs
= {
492 .print_l2_protection_fault_status
= mmhub_v2_0_print_l2_protection_fault_status
,
493 .get_invalidate_req
= mmhub_v2_0_get_invalidate_req
,
496 static void mmhub_v2_0_init(struct amdgpu_device
*adev
)
498 struct amdgpu_vmhub
*hub
= &adev
->vmhub
[AMDGPU_MMHUB_0
];
500 hub
->ctx0_ptb_addr_lo32
=
501 SOC15_REG_OFFSET(MMHUB
, 0,
502 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
);
503 hub
->ctx0_ptb_addr_hi32
=
504 SOC15_REG_OFFSET(MMHUB
, 0,
505 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
);
506 hub
->vm_inv_eng0_sem
=
507 SOC15_REG_OFFSET(MMHUB
, 0, mmMMVM_INVALIDATE_ENG0_SEM
);
508 hub
->vm_inv_eng0_req
=
509 SOC15_REG_OFFSET(MMHUB
, 0, mmMMVM_INVALIDATE_ENG0_REQ
);
510 hub
->vm_inv_eng0_ack
=
511 SOC15_REG_OFFSET(MMHUB
, 0, mmMMVM_INVALIDATE_ENG0_ACK
);
512 hub
->vm_context0_cntl
=
513 SOC15_REG_OFFSET(MMHUB
, 0, mmMMVM_CONTEXT0_CNTL
);
514 hub
->vm_l2_pro_fault_status
=
515 SOC15_REG_OFFSET(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_STATUS
);
516 hub
->vm_l2_pro_fault_cntl
=
517 SOC15_REG_OFFSET(MMHUB
, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL
);
519 hub
->ctx_distance
= mmMMVM_CONTEXT1_CNTL
- mmMMVM_CONTEXT0_CNTL
;
520 hub
->ctx_addr_distance
= mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
-
521 mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
;
522 hub
->eng_distance
= mmMMVM_INVALIDATE_ENG1_REQ
-
523 mmMMVM_INVALIDATE_ENG0_REQ
;
524 hub
->eng_addr_distance
= mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32
-
525 mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32
;
527 hub
->vm_cntx_cntl_vm_fault
= MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
528 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
529 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
530 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
531 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
532 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
533 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
;
535 hub
->vmhub_funcs
= &mmhub_v2_0_vmhub_funcs
;
538 static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device
*adev
,
541 uint32_t def
, data
, def1
, data1
;
543 switch (adev
->asic_type
) {
544 case CHIP_SIENNA_CICHLID
:
545 case CHIP_NAVY_FLOUNDER
:
546 def
= data
= RREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid
);
547 def1
= data1
= RREG32_SOC15(MMHUB
, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid
);
550 def
= data
= RREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG
);
551 def1
= data1
= RREG32_SOC15(MMHUB
, 0, mmDAGB0_CNTL_MISC2
);
555 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_MGCG
)) {
556 data
|= MM_ATC_L2_MISC_CG__ENABLE_MASK
;
558 data1
&= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK
|
559 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK
|
560 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK
|
561 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK
|
562 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK
|
563 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK
);
566 data
&= ~MM_ATC_L2_MISC_CG__ENABLE_MASK
;
568 data1
|= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK
|
569 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK
|
570 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK
|
571 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK
|
572 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK
|
573 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK
);
576 switch (adev
->asic_type
) {
577 case CHIP_SIENNA_CICHLID
:
578 case CHIP_NAVY_FLOUNDER
:
580 WREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid
, data
);
582 WREG32_SOC15(MMHUB
, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid
, data1
);
586 WREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG
, data
);
588 WREG32_SOC15(MMHUB
, 0, mmDAGB0_CNTL_MISC2
, data1
);
593 static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device
*adev
,
598 switch (adev
->asic_type
) {
599 case CHIP_SIENNA_CICHLID
:
600 case CHIP_NAVY_FLOUNDER
:
601 def
= data
= RREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid
);
604 def
= data
= RREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG
);
608 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_MC_LS
))
609 data
|= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK
;
611 data
&= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK
;
614 switch (adev
->asic_type
) {
615 case CHIP_SIENNA_CICHLID
:
616 case CHIP_NAVY_FLOUNDER
:
617 WREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid
, data
);
620 WREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG
, data
);
626 static int mmhub_v2_0_set_clockgating(struct amdgpu_device
*adev
,
627 enum amd_clockgating_state state
)
629 if (amdgpu_sriov_vf(adev
))
632 switch (adev
->asic_type
) {
636 case CHIP_SIENNA_CICHLID
:
637 case CHIP_NAVY_FLOUNDER
:
638 mmhub_v2_0_update_medium_grain_clock_gating(adev
,
639 state
== AMD_CG_STATE_GATE
);
640 mmhub_v2_0_update_medium_grain_light_sleep(adev
,
641 state
== AMD_CG_STATE_GATE
);
650 static void mmhub_v2_0_get_clockgating(struct amdgpu_device
*adev
, u32
*flags
)
654 if (amdgpu_sriov_vf(adev
))
657 switch (adev
->asic_type
) {
658 case CHIP_SIENNA_CICHLID
:
659 case CHIP_NAVY_FLOUNDER
:
660 data
= RREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid
);
661 data1
= RREG32_SOC15(MMHUB
, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid
);
664 data
= RREG32_SOC15(MMHUB
, 0, mmMM_ATC_L2_MISC_CG
);
665 data1
= RREG32_SOC15(MMHUB
, 0, mmDAGB0_CNTL_MISC2
);
669 /* AMD_CG_SUPPORT_MC_MGCG */
670 if ((data
& MM_ATC_L2_MISC_CG__ENABLE_MASK
) &&
671 !(data1
& (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK
|
672 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK
|
673 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK
|
674 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK
|
675 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK
|
676 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK
)))
677 *flags
|= AMD_CG_SUPPORT_MC_MGCG
;
679 /* AMD_CG_SUPPORT_MC_LS */
680 if (data
& MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK
)
681 *flags
|= AMD_CG_SUPPORT_MC_LS
;
684 const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs
= {
685 .ras_late_init
= amdgpu_mmhub_ras_late_init
,
686 .init
= mmhub_v2_0_init
,
687 .gart_enable
= mmhub_v2_0_gart_enable
,
688 .set_fault_enable_default
= mmhub_v2_0_set_fault_enable_default
,
689 .gart_disable
= mmhub_v2_0_gart_disable
,
690 .set_clockgating
= mmhub_v2_0_set_clockgating
,
691 .get_clockgating
= mmhub_v2_0_get_clockgating
,
692 .setup_vm_pt_regs
= mmhub_v2_0_setup_vm_pt_regs
,