]>
Commit | Line | Data |
---|---|---|
e60f8db5 AX |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
fd5fd480 | 24 | #include <drm/drm_cache.h> |
e60f8db5 AX |
25 | #include "amdgpu.h" |
26 | #include "gmc_v9_0.h" | |
8d6a5230 | 27 | #include "amdgpu_atomfirmware.h" |
2cddc50e | 28 | #include "amdgpu_gem.h" |
e60f8db5 | 29 | |
75199b8c FX |
30 | #include "hdp/hdp_4_0_offset.h" |
31 | #include "hdp/hdp_4_0_sh_mask.h" | |
cde5c34f | 32 | #include "gc/gc_9_0_sh_mask.h" |
135d4b10 FX |
33 | #include "dce/dce_12_0_offset.h" |
34 | #include "dce/dce_12_0_sh_mask.h" | |
fb960bd2 | 35 | #include "vega10_enum.h" |
65417d9f | 36 | #include "mmhub/mmhub_1_0_offset.h" |
6ce68225 | 37 | #include "athub/athub_1_0_offset.h" |
250b4228 | 38 | #include "oss/osssys_4_0_offset.h" |
e60f8db5 | 39 | |
946a4d5b | 40 | #include "soc15.h" |
e60f8db5 | 41 | #include "soc15_common.h" |
90c7a935 | 42 | #include "umc/umc_6_0_sh_mask.h" |
e60f8db5 | 43 | |
e60f8db5 AX |
44 | #include "gfxhub_v1_0.h" |
45 | #include "mmhub_v1_0.h" | |
bf0a60b7 | 46 | #include "gfxhub_v1_1.h" |
e60f8db5 | 47 | |
44a99b65 AG |
48 | #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" |
49 | ||
ebdef28e AD |
50 | /* add these here since we already include dce12 headers and these are for DCN */ |
51 | #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d | |
52 | #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 | |
53 | #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 | |
54 | #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 | |
55 | #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL | |
56 | #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L | |
57 | ||
e60f8db5 AX |
58 | /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ |
59 | #define AMDGPU_NUM_OF_VMIDS 8 | |
60 | ||
61 | static const u32 golden_settings_vega10_hdp[] = | |
62 | { | |
63 | 0xf64, 0x0fffffff, 0x00000000, | |
64 | 0xf65, 0x0fffffff, 0x00000000, | |
65 | 0xf66, 0x0fffffff, 0x00000000, | |
66 | 0xf67, 0x0fffffff, 0x00000000, | |
67 | 0xf68, 0x0fffffff, 0x00000000, | |
68 | 0xf6a, 0x0fffffff, 0x00000000, | |
69 | 0xf6b, 0x0fffffff, 0x00000000, | |
70 | 0xf6c, 0x0fffffff, 0x00000000, | |
71 | 0xf6d, 0x0fffffff, 0x00000000, | |
72 | 0xf6e, 0x0fffffff, 0x00000000, | |
73 | }; | |
74 | ||
946a4d5b | 75 | static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = |
5c583018 | 76 | { |
946a4d5b SL |
77 | SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), |
78 | SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) | |
5c583018 EQ |
79 | }; |
80 | ||
946a4d5b | 81 | static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = |
5c583018 | 82 | { |
946a4d5b SL |
83 | SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), |
84 | SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) | |
5c583018 EQ |
85 | }; |
86 | ||
02bab923 DP |
87 | /* Ecc related register addresses, (BASE + reg offset) */ |
88 | /* Universal Memory Controller caps (may be fused). */ | |
89 | /* UMCCH:UmcLocalCap */ | |
90 | #define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000) | |
91 | #define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800) | |
92 | #define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000) | |
93 | #define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800) | |
94 | #define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000) | |
95 | #define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800) | |
96 | #define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000) | |
97 | #define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800) | |
98 | #define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000) | |
99 | #define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800) | |
100 | #define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000) | |
101 | #define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800) | |
102 | #define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000) | |
103 | #define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800) | |
104 | #define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000) | |
105 | #define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800) | |
106 | ||
107 | /* Universal Memory Controller Channel config. */ | |
108 | /* UMCCH:UMC_CONFIG */ | |
109 | #define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000) | |
110 | #define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800) | |
111 | #define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000) | |
112 | #define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800) | |
113 | #define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000) | |
114 | #define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800) | |
115 | #define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000) | |
116 | #define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800) | |
117 | #define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000) | |
118 | #define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800) | |
119 | #define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000) | |
120 | #define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800) | |
121 | #define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000) | |
122 | #define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800) | |
123 | #define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000) | |
124 | #define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800) | |
125 | ||
126 | /* Universal Memory Controller Channel Ecc config. */ | |
127 | /* UMCCH:EccCtrl */ | |
128 | #define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000) | |
129 | #define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800) | |
130 | #define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000) | |
131 | #define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800) | |
132 | #define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000) | |
133 | #define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800) | |
134 | #define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000) | |
135 | #define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800) | |
136 | #define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000) | |
137 | #define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800) | |
138 | #define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000) | |
139 | #define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800) | |
140 | #define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000) | |
141 | #define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800) | |
142 | #define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000) | |
143 | #define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800) | |
144 | ||
145 | static const uint32_t ecc_umclocalcap_addrs[] = { | |
146 | UMCLOCALCAPS_ADDR0, | |
147 | UMCLOCALCAPS_ADDR1, | |
148 | UMCLOCALCAPS_ADDR2, | |
149 | UMCLOCALCAPS_ADDR3, | |
150 | UMCLOCALCAPS_ADDR4, | |
151 | UMCLOCALCAPS_ADDR5, | |
152 | UMCLOCALCAPS_ADDR6, | |
153 | UMCLOCALCAPS_ADDR7, | |
154 | UMCLOCALCAPS_ADDR8, | |
155 | UMCLOCALCAPS_ADDR9, | |
156 | UMCLOCALCAPS_ADDR10, | |
157 | UMCLOCALCAPS_ADDR11, | |
158 | UMCLOCALCAPS_ADDR12, | |
159 | UMCLOCALCAPS_ADDR13, | |
160 | UMCLOCALCAPS_ADDR14, | |
161 | UMCLOCALCAPS_ADDR15, | |
162 | }; | |
163 | ||
164 | static const uint32_t ecc_umcch_umc_config_addrs[] = { | |
165 | UMCCH_UMC_CONFIG_ADDR0, | |
166 | UMCCH_UMC_CONFIG_ADDR1, | |
167 | UMCCH_UMC_CONFIG_ADDR2, | |
168 | UMCCH_UMC_CONFIG_ADDR3, | |
169 | UMCCH_UMC_CONFIG_ADDR4, | |
170 | UMCCH_UMC_CONFIG_ADDR5, | |
171 | UMCCH_UMC_CONFIG_ADDR6, | |
172 | UMCCH_UMC_CONFIG_ADDR7, | |
173 | UMCCH_UMC_CONFIG_ADDR8, | |
174 | UMCCH_UMC_CONFIG_ADDR9, | |
175 | UMCCH_UMC_CONFIG_ADDR10, | |
176 | UMCCH_UMC_CONFIG_ADDR11, | |
177 | UMCCH_UMC_CONFIG_ADDR12, | |
178 | UMCCH_UMC_CONFIG_ADDR13, | |
179 | UMCCH_UMC_CONFIG_ADDR14, | |
180 | UMCCH_UMC_CONFIG_ADDR15, | |
181 | }; | |
182 | ||
183 | static const uint32_t ecc_umcch_eccctrl_addrs[] = { | |
184 | UMCCH_ECCCTRL_ADDR0, | |
185 | UMCCH_ECCCTRL_ADDR1, | |
186 | UMCCH_ECCCTRL_ADDR2, | |
187 | UMCCH_ECCCTRL_ADDR3, | |
188 | UMCCH_ECCCTRL_ADDR4, | |
189 | UMCCH_ECCCTRL_ADDR5, | |
190 | UMCCH_ECCCTRL_ADDR6, | |
191 | UMCCH_ECCCTRL_ADDR7, | |
192 | UMCCH_ECCCTRL_ADDR8, | |
193 | UMCCH_ECCCTRL_ADDR9, | |
194 | UMCCH_ECCCTRL_ADDR10, | |
195 | UMCCH_ECCCTRL_ADDR11, | |
196 | UMCCH_ECCCTRL_ADDR12, | |
197 | UMCCH_ECCCTRL_ADDR13, | |
198 | UMCCH_ECCCTRL_ADDR14, | |
199 | UMCCH_ECCCTRL_ADDR15, | |
200 | }; | |
201 | ||
e60f8db5 AX |
202 | static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
203 | struct amdgpu_irq_src *src, | |
204 | unsigned type, | |
205 | enum amdgpu_interrupt_state state) | |
206 | { | |
207 | struct amdgpu_vmhub *hub; | |
ae6d1416 | 208 | u32 tmp, reg, bits, i, j; |
e60f8db5 | 209 | |
11250164 CK |
210 | bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
211 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
212 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
213 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
214 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
215 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
216 | VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; | |
217 | ||
e60f8db5 AX |
218 | switch (state) { |
219 | case AMDGPU_IRQ_STATE_DISABLE: | |
ae6d1416 TSD |
220 | for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { |
221 | hub = &adev->vmhub[j]; | |
222 | for (i = 0; i < 16; i++) { | |
223 | reg = hub->vm_context0_cntl + i; | |
224 | tmp = RREG32(reg); | |
225 | tmp &= ~bits; | |
226 | WREG32(reg, tmp); | |
227 | } | |
e60f8db5 AX |
228 | } |
229 | break; | |
230 | case AMDGPU_IRQ_STATE_ENABLE: | |
ae6d1416 TSD |
231 | for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { |
232 | hub = &adev->vmhub[j]; | |
233 | for (i = 0; i < 16; i++) { | |
234 | reg = hub->vm_context0_cntl + i; | |
235 | tmp = RREG32(reg); | |
236 | tmp |= bits; | |
237 | WREG32(reg, tmp); | |
238 | } | |
e60f8db5 | 239 | } |
e60f8db5 AX |
240 | default: |
241 | break; | |
242 | } | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
22666cc1 CK |
247 | /** |
248 | * vega10_ih_prescreen_iv - prescreen an interrupt vector | |
249 | * | |
250 | * @adev: amdgpu_device pointer | |
251 | * | |
252 | * Returns true if the interrupt vector should be further processed. | |
253 | */ | |
254 | static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev, | |
255 | struct amdgpu_iv_entry *entry, | |
256 | uint64_t addr) | |
257 | { | |
258 | struct amdgpu_vm *vm; | |
259 | u64 key; | |
260 | int r; | |
261 | ||
262 | /* No PASID, can't identify faulting process */ | |
263 | if (!entry->pasid) | |
264 | return true; | |
265 | ||
266 | /* Not a retry fault */ | |
267 | if (!(entry->src_data[1] & 0x80)) | |
268 | return true; | |
269 | ||
270 | /* Track retry faults in per-VM fault FIFO. */ | |
271 | spin_lock(&adev->vm_manager.pasid_lock); | |
272 | vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid); | |
273 | if (!vm) { | |
274 | /* VM not found, process it normally */ | |
275 | spin_unlock(&adev->vm_manager.pasid_lock); | |
276 | return true; | |
277 | } | |
278 | ||
279 | key = AMDGPU_VM_FAULT(entry->pasid, addr); | |
280 | r = amdgpu_vm_add_fault(vm->fault_hash, key); | |
281 | ||
282 | /* Hash table is full or the fault is already being processed, | |
283 | * ignore further page faults | |
284 | */ | |
285 | if (r != 0) { | |
286 | spin_unlock(&adev->vm_manager.pasid_lock); | |
287 | return false; | |
288 | } | |
289 | /* No locking required with single writer and single reader */ | |
290 | r = kfifo_put(&vm->faults, key); | |
291 | if (!r) { | |
292 | /* FIFO is full. Ignore it until there is space */ | |
293 | amdgpu_vm_clear_fault(vm->fault_hash, key); | |
294 | spin_unlock(&adev->vm_manager.pasid_lock); | |
295 | return false; | |
296 | } | |
297 | ||
298 | spin_unlock(&adev->vm_manager.pasid_lock); | |
299 | /* It's the first fault for this address, process it normally */ | |
300 | return true; | |
301 | } | |
302 | ||
e60f8db5 AX |
303 | static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, |
304 | struct amdgpu_irq_src *source, | |
305 | struct amdgpu_iv_entry *entry) | |
306 | { | |
c4f46f22 | 307 | struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; |
4d6cbde3 | 308 | uint32_t status = 0; |
e60f8db5 AX |
309 | u64 addr; |
310 | ||
311 | addr = (u64)entry->src_data[0] << 12; | |
312 | addr |= ((u64)entry->src_data[1] & 0xf) << 44; | |
313 | ||
22666cc1 CK |
314 | if (!gmc_v9_0_prescreen_iv(adev, entry, addr)) |
315 | return 1; /* This also prevents sending it to KFD */ | |
316 | ||
79a0c465 | 317 | if (!amdgpu_sriov_vf(adev)) { |
5a9b8e8a CK |
318 | status = RREG32(hub->vm_l2_pro_fault_status); |
319 | WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); | |
4d6cbde3 | 320 | } |
e60f8db5 | 321 | |
4d6cbde3 | 322 | if (printk_ratelimit()) { |
efaa9646 AG |
323 | struct amdgpu_task_info task_info = { 0 }; |
324 | ||
325 | amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); | |
326 | ||
4d6cbde3 | 327 | dev_err(adev->dev, |
0c79c0bb | 328 | "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", |
c4f46f22 CK |
329 | entry->vmid_src ? "mmhub" : "gfxhub", |
330 | entry->src_id, entry->ring_id, entry->vmid, | |
efaa9646 AG |
331 | entry->pasid, task_info.process_name, task_info.tgid, |
332 | task_info.task_name, task_info.pid); | |
7d0aa376 | 333 | dev_err(adev->dev, " in page starting at address 0x%016llx from %d\n", |
4d6cbde3 FK |
334 | addr, entry->client_id); |
335 | if (!amdgpu_sriov_vf(adev)) | |
336 | dev_err(adev->dev, | |
337 | "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", | |
338 | status); | |
79a0c465 | 339 | } |
e60f8db5 AX |
340 | |
341 | return 0; | |
342 | } | |
343 | ||
344 | static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { | |
345 | .set = gmc_v9_0_vm_fault_interrupt_state, | |
346 | .process = gmc_v9_0_process_interrupt, | |
347 | }; | |
348 | ||
349 | static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) | |
350 | { | |
770d13b1 CK |
351 | adev->gmc.vm_fault.num_types = 1; |
352 | adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; | |
e60f8db5 AX |
353 | } |
354 | ||
2a79d868 YZ |
355 | static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, |
356 | uint32_t flush_type) | |
03f89feb CK |
357 | { |
358 | u32 req = 0; | |
359 | ||
03f89feb | 360 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, |
c4f46f22 | 361 | PER_VMID_INVALIDATE_REQ, 1 << vmid); |
2a79d868 | 362 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); |
03f89feb CK |
363 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); |
364 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); | |
365 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); | |
366 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); | |
367 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); | |
368 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, | |
369 | CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); | |
370 | ||
371 | return req; | |
372 | } | |
373 | ||
e60f8db5 AX |
374 | /* |
375 | * GART | |
376 | * VMID 0 is the physical GPU addresses as used by the kernel. | |
377 | * VMIDs 1-15 are used for userspace clients and are handled | |
378 | * by the amdgpu vm/hsa code. | |
379 | */ | |
380 | ||
381 | /** | |
2a79d868 | 382 | * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type |
e60f8db5 AX |
383 | * |
384 | * @adev: amdgpu_device pointer | |
385 | * @vmid: vm instance to flush | |
2a79d868 | 386 | * @flush_type: the flush type |
e60f8db5 | 387 | * |
2a79d868 | 388 | * Flush the TLB for the requested page table using certain type. |
e60f8db5 | 389 | */ |
132f34e4 | 390 | static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, |
2a79d868 | 391 | uint32_t vmid, uint32_t flush_type) |
e60f8db5 | 392 | { |
e60f8db5 AX |
393 | const unsigned eng = 17; |
394 | unsigned i, j; | |
e60f8db5 AX |
395 | |
396 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | |
397 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; | |
2a79d868 | 398 | u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); |
e60f8db5 | 399 | |
82d1a1b1 CG |
400 | /* This is necessary for a HW workaround under SRIOV as well |
401 | * as GFXOFF under bare metal | |
402 | */ | |
403 | if (adev->gfx.kiq.ring.sched.ready && | |
404 | (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && | |
405 | !adev->in_gpu_reset) { | |
af5fe1e9 CK |
406 | uint32_t req = hub->vm_inv_eng0_req + eng; |
407 | uint32_t ack = hub->vm_inv_eng0_ack + eng; | |
408 | ||
409 | amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, | |
410 | 1 << vmid); | |
411 | continue; | |
fc0faf04 | 412 | } |
3890d111 ED |
413 | |
414 | spin_lock(&adev->gmc.invalidate_lock); | |
c7a7266b | 415 | WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); |
e60f8db5 | 416 | for (j = 0; j < adev->usec_timeout; j++) { |
c7a7266b | 417 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); |
396557b0 | 418 | if (tmp & (1 << vmid)) |
e60f8db5 AX |
419 | break; |
420 | udelay(1); | |
421 | } | |
3890d111 | 422 | spin_unlock(&adev->gmc.invalidate_lock); |
396557b0 CK |
423 | if (j < adev->usec_timeout) |
424 | continue; | |
425 | ||
e60f8db5 AX |
426 | DRM_ERROR("Timeout waiting for VM flush ACK!\n"); |
427 | } | |
e60f8db5 AX |
428 | } |
429 | ||
9096d6e5 | 430 | static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
c633c00b | 431 | unsigned vmid, uint64_t pd_addr) |
9096d6e5 | 432 | { |
250b4228 CK |
433 | struct amdgpu_device *adev = ring->adev; |
434 | struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; | |
2a79d868 | 435 | uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); |
9096d6e5 CK |
436 | unsigned eng = ring->vm_inv_eng; |
437 | ||
9096d6e5 CK |
438 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), |
439 | lower_32_bits(pd_addr)); | |
440 | ||
441 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), | |
442 | upper_32_bits(pd_addr)); | |
443 | ||
f8bc9037 AD |
444 | amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, |
445 | hub->vm_inv_eng0_ack + eng, | |
446 | req, 1 << vmid); | |
f732b6b3 | 447 | |
9096d6e5 CK |
448 | return pd_addr; |
449 | } | |
450 | ||
c633c00b CK |
451 | static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, |
452 | unsigned pasid) | |
453 | { | |
454 | struct amdgpu_device *adev = ring->adev; | |
455 | uint32_t reg; | |
456 | ||
457 | if (ring->funcs->vmhub == AMDGPU_GFXHUB) | |
458 | reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; | |
459 | else | |
460 | reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; | |
461 | ||
462 | amdgpu_ring_emit_wreg(ring, reg, pasid); | |
463 | } | |
464 | ||
e60f8db5 | 465 | /** |
132f34e4 | 466 | * gmc_v9_0_set_pte_pde - update the page tables using MMIO |
e60f8db5 AX |
467 | * |
468 | * @adev: amdgpu_device pointer | |
469 | * @cpu_pt_addr: cpu address of the page table | |
470 | * @gpu_page_idx: entry in the page table to update | |
471 | * @addr: dst addr to write into pte/pde | |
472 | * @flags: access flags | |
473 | * | |
474 | * Update the page tables using the CPU. | |
475 | */ | |
132f34e4 CK |
476 | static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, |
477 | uint32_t gpu_page_idx, uint64_t addr, | |
478 | uint64_t flags) | |
e60f8db5 AX |
479 | { |
480 | void __iomem *ptr = (void *)cpu_pt_addr; | |
481 | uint64_t value; | |
482 | ||
483 | /* | |
484 | * PTE format on VEGA 10: | |
485 | * 63:59 reserved | |
486 | * 58:57 mtype | |
487 | * 56 F | |
488 | * 55 L | |
489 | * 54 P | |
490 | * 53 SW | |
491 | * 52 T | |
492 | * 50:48 reserved | |
493 | * 47:12 4k physical page base address | |
494 | * 11:7 fragment | |
495 | * 6 write | |
496 | * 5 read | |
497 | * 4 exe | |
498 | * 3 Z | |
499 | * 2 snooped | |
500 | * 1 system | |
501 | * 0 valid | |
502 | * | |
503 | * PDE format on VEGA 10: | |
504 | * 63:59 block fragment size | |
505 | * 58:55 reserved | |
506 | * 54 P | |
507 | * 53:48 reserved | |
508 | * 47:6 physical base address of PD or PTE | |
509 | * 5:3 reserved | |
510 | * 2 C | |
511 | * 1 system | |
512 | * 0 valid | |
513 | */ | |
514 | ||
515 | /* | |
516 | * The following is for PTE only. GART does not have PDEs. | |
517 | */ | |
518 | value = addr & 0x0000FFFFFFFFF000ULL; | |
519 | value |= flags; | |
520 | writeq(value, ptr + (gpu_page_idx * 8)); | |
521 | return 0; | |
522 | } | |
523 | ||
524 | static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, | |
525 | uint32_t flags) | |
526 | ||
527 | { | |
528 | uint64_t pte_flag = 0; | |
529 | ||
530 | if (flags & AMDGPU_VM_PAGE_EXECUTABLE) | |
531 | pte_flag |= AMDGPU_PTE_EXECUTABLE; | |
532 | if (flags & AMDGPU_VM_PAGE_READABLE) | |
533 | pte_flag |= AMDGPU_PTE_READABLE; | |
534 | if (flags & AMDGPU_VM_PAGE_WRITEABLE) | |
535 | pte_flag |= AMDGPU_PTE_WRITEABLE; | |
536 | ||
537 | switch (flags & AMDGPU_VM_MTYPE_MASK) { | |
538 | case AMDGPU_VM_MTYPE_DEFAULT: | |
539 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
540 | break; | |
541 | case AMDGPU_VM_MTYPE_NC: | |
542 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
543 | break; | |
544 | case AMDGPU_VM_MTYPE_WC: | |
545 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC); | |
546 | break; | |
547 | case AMDGPU_VM_MTYPE_CC: | |
548 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC); | |
549 | break; | |
550 | case AMDGPU_VM_MTYPE_UC: | |
551 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC); | |
552 | break; | |
553 | default: | |
554 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
555 | break; | |
556 | } | |
557 | ||
558 | if (flags & AMDGPU_VM_PAGE_PRT) | |
559 | pte_flag |= AMDGPU_PTE_PRT; | |
560 | ||
561 | return pte_flag; | |
562 | } | |
563 | ||
3de676d8 CK |
564 | static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, |
565 | uint64_t *addr, uint64_t *flags) | |
e60f8db5 | 566 | { |
bbc9fb10 | 567 | if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) |
3de676d8 | 568 | *addr = adev->vm_manager.vram_base_offset + *addr - |
770d13b1 | 569 | adev->gmc.vram_start; |
3de676d8 | 570 | BUG_ON(*addr & 0xFFFF00000000003FULL); |
6a42fd6f | 571 | |
770d13b1 | 572 | if (!adev->gmc.translate_further) |
6a42fd6f CK |
573 | return; |
574 | ||
575 | if (level == AMDGPU_VM_PDB1) { | |
576 | /* Set the block fragment size */ | |
577 | if (!(*flags & AMDGPU_PDE_PTE)) | |
578 | *flags |= AMDGPU_PDE_BFS(0x9); | |
579 | ||
580 | } else if (level == AMDGPU_VM_PDB0) { | |
581 | if (*flags & AMDGPU_PDE_PTE) | |
582 | *flags &= ~AMDGPU_PDE_PTE; | |
583 | else | |
584 | *flags |= AMDGPU_PTE_TF; | |
585 | } | |
e60f8db5 AX |
586 | } |
587 | ||
132f34e4 CK |
588 | static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { |
589 | .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, | |
9096d6e5 | 590 | .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, |
c633c00b | 591 | .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, |
132f34e4 | 592 | .set_pte_pde = gmc_v9_0_set_pte_pde, |
b1166325 CK |
593 | .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, |
594 | .get_vm_pde = gmc_v9_0_get_vm_pde | |
e60f8db5 AX |
595 | }; |
596 | ||
132f34e4 | 597 | static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) |
e60f8db5 | 598 | { |
f54b30d7 | 599 | adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; |
e60f8db5 AX |
600 | } |
601 | ||
602 | static int gmc_v9_0_early_init(void *handle) | |
603 | { | |
604 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
605 | ||
132f34e4 | 606 | gmc_v9_0_set_gmc_funcs(adev); |
e60f8db5 AX |
607 | gmc_v9_0_set_irq_funcs(adev); |
608 | ||
770d13b1 CK |
609 | adev->gmc.shared_aperture_start = 0x2000000000000000ULL; |
610 | adev->gmc.shared_aperture_end = | |
611 | adev->gmc.shared_aperture_start + (4ULL << 30) - 1; | |
bfa8eea2 | 612 | adev->gmc.private_aperture_start = 0x1000000000000000ULL; |
770d13b1 CK |
613 | adev->gmc.private_aperture_end = |
614 | adev->gmc.private_aperture_start + (4ULL << 30) - 1; | |
a7ea6548 | 615 | |
e60f8db5 AX |
616 | return 0; |
617 | } | |
618 | ||
02bab923 DP |
619 | static int gmc_v9_0_ecc_available(struct amdgpu_device *adev) |
620 | { | |
621 | uint32_t reg_val; | |
622 | uint32_t reg_addr; | |
623 | uint32_t field_val; | |
624 | size_t i; | |
625 | uint32_t fv2; | |
626 | size_t lost_sheep; | |
627 | ||
628 | DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n"); | |
629 | ||
630 | lost_sheep = 0; | |
631 | for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) { | |
632 | reg_addr = ecc_umclocalcap_addrs[i]; | |
633 | DRM_DEBUG("ecc: " | |
634 | "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n", | |
635 | i, reg_addr); | |
636 | reg_val = RREG32(reg_addr); | |
637 | field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap, | |
638 | EccDis); | |
639 | DRM_DEBUG("ecc: " | |
640 | "reg_val: 0x%08x, " | |
641 | "EccDis: 0x%08x, ", | |
642 | reg_val, field_val); | |
643 | if (field_val) { | |
644 | DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n"); | |
645 | ++lost_sheep; | |
646 | } | |
647 | } | |
648 | ||
649 | for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) { | |
650 | reg_addr = ecc_umcch_umc_config_addrs[i]; | |
651 | DRM_DEBUG("ecc: " | |
652 | "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x", | |
653 | i, reg_addr); | |
654 | reg_val = RREG32(reg_addr); | |
655 | field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG, | |
656 | DramReady); | |
657 | DRM_DEBUG("ecc: " | |
658 | "reg_val: 0x%08x, " | |
659 | "DramReady: 0x%08x\n", | |
660 | reg_val, field_val); | |
661 | ||
662 | if (!field_val) { | |
663 | DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n"); | |
664 | ++lost_sheep; | |
665 | } | |
666 | } | |
667 | ||
668 | for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) { | |
669 | reg_addr = ecc_umcch_eccctrl_addrs[i]; | |
670 | DRM_DEBUG("ecc: " | |
671 | "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ", | |
672 | i, reg_addr); | |
673 | reg_val = RREG32(reg_addr); | |
674 | field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl, | |
675 | WrEccEn); | |
676 | fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl, | |
677 | RdEccEn); | |
678 | DRM_DEBUG("ecc: " | |
679 | "reg_val: 0x%08x, " | |
680 | "WrEccEn: 0x%08x, " | |
681 | "RdEccEn: 0x%08x\n", | |
682 | reg_val, field_val, fv2); | |
683 | ||
684 | if (!field_val) { | |
5a16008f | 685 | DRM_DEBUG("ecc: WrEccEn is not set\n"); |
02bab923 DP |
686 | ++lost_sheep; |
687 | } | |
688 | if (!fv2) { | |
5a16008f | 689 | DRM_DEBUG("ecc: RdEccEn is not set\n"); |
02bab923 DP |
690 | ++lost_sheep; |
691 | } | |
692 | } | |
693 | ||
694 | DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep); | |
695 | return lost_sheep == 0; | |
696 | } | |
697 | ||
cd2b5623 AD |
698 | static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) |
699 | { | |
700 | ||
701 | /* | |
702 | * TODO: | |
703 | * Currently there is a bug where some memory client outside | |
704 | * of the driver writes to first 8M of VRAM on S3 resume, | |
705 | * this overrides GART which by default gets placed in first 8M and | |
706 | * causes VM_FAULTS once GTT is accessed. | |
707 | * Keep the stolen memory reservation until the while this is not solved. | |
708 | * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init | |
709 | */ | |
710 | switch (adev->asic_type) { | |
6abc0c8f AD |
711 | case CHIP_VEGA10: |
712 | return true; | |
cd2b5623 | 713 | case CHIP_RAVEN: |
cd2b5623 AD |
714 | case CHIP_VEGA12: |
715 | case CHIP_VEGA20: | |
716 | default: | |
6abc0c8f | 717 | return false; |
cd2b5623 AD |
718 | } |
719 | } | |
720 | ||
e60f8db5 AX |
721 | static int gmc_v9_0_late_init(void *handle) |
722 | { | |
723 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
c5066129 | 724 | /* |
725 | * The latest engine allocation on gfx9 is: | |
726 | * Engine 0, 1: idle | |
727 | * Engine 2, 3: firmware | |
728 | * Engine 4~13: amdgpu ring, subject to change when ring number changes | |
729 | * Engine 14~15: idle | |
730 | * Engine 16: kfd tlb invalidation | |
731 | * Engine 17: Gart flushes | |
732 | */ | |
733 | unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; | |
4789c463 | 734 | unsigned i; |
02bab923 | 735 | int r; |
4789c463 | 736 | |
cd2b5623 AD |
737 | if (!gmc_v9_0_keep_stolen_memory(adev)) |
738 | amdgpu_bo_late_init(adev); | |
6f752ec2 | 739 | |
4789c463 CK |
740 | for(i = 0; i < adev->num_rings; ++i) { |
741 | struct amdgpu_ring *ring = adev->rings[i]; | |
742 | unsigned vmhub = ring->funcs->vmhub; | |
743 | ||
744 | ring->vm_inv_eng = vm_inv_eng[vmhub]++; | |
6e82c6e0 CK |
745 | dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", |
746 | ring->name, ring->vm_inv_eng, ring->funcs->vmhub); | |
4789c463 CK |
747 | } |
748 | ||
c5066129 | 749 | /* Engine 16 is used for KFD and 17 for GART flushes */ |
4789c463 | 750 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) |
c5066129 | 751 | BUG_ON(vm_inv_eng[i] > 16); |
4789c463 | 752 | |
7b6cbae2 | 753 | if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) { |
5ba4fa35 AD |
754 | r = gmc_v9_0_ecc_available(adev); |
755 | if (r == 1) { | |
756 | DRM_INFO("ECC is active.\n"); | |
757 | } else if (r == 0) { | |
758 | DRM_INFO("ECC is not present.\n"); | |
e1d1a772 | 759 | adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); |
5ba4fa35 AD |
760 | } else { |
761 | DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r); | |
762 | return r; | |
763 | } | |
02bab923 DP |
764 | } |
765 | ||
770d13b1 | 766 | return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); |
e60f8db5 AX |
767 | } |
768 | ||
769 | static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, | |
770d13b1 | 770 | struct amdgpu_gmc *mc) |
e60f8db5 | 771 | { |
eeb2487d ML |
772 | u64 base = 0; |
773 | if (!amdgpu_sriov_vf(adev)) | |
774 | base = mmhub_v1_0_get_fb_location(adev); | |
6fdd68b1 AD |
775 | /* add the xgmi offset of the physical node */ |
776 | base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; | |
961c75cf CK |
777 | amdgpu_gmc_vram_location(adev, &adev->gmc, base); |
778 | amdgpu_gmc_gart_location(adev, mc); | |
c3e1b43c CK |
779 | if (!amdgpu_sriov_vf(adev)) |
780 | amdgpu_gmc_agp_location(adev, mc); | |
bc099ee9 | 781 | /* base offset of vram pages */ |
b6110c00 | 782 | adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); |
6fdd68b1 AD |
783 | |
784 | /* XXX: add the xgmi offset of the physical node? */ | |
785 | adev->vm_manager.vram_base_offset += | |
786 | adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; | |
e60f8db5 AX |
787 | } |
788 | ||
789 | /** | |
790 | * gmc_v9_0_mc_init - initialize the memory controller driver params | |
791 | * | |
792 | * @adev: amdgpu_device pointer | |
793 | * | |
794 | * Look up the amount of vram, vram width, and decide how to place | |
795 | * vram and gart within the GPU's physical address space. | |
796 | * Returns 0 for success. | |
797 | */ | |
798 | static int gmc_v9_0_mc_init(struct amdgpu_device *adev) | |
799 | { | |
e60f8db5 | 800 | int chansize, numchan; |
d6895ad3 | 801 | int r; |
e60f8db5 | 802 | |
3d918c0e SL |
803 | if (amdgpu_emu_mode != 1) |
804 | adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); | |
770d13b1 | 805 | if (!adev->gmc.vram_width) { |
8d6a5230 | 806 | /* hbm memory channel size */ |
585b7f16 TSD |
807 | if (adev->flags & AMD_IS_APU) |
808 | chansize = 64; | |
809 | else | |
810 | chansize = 128; | |
8d6a5230 | 811 | |
070706c0 | 812 | numchan = adev->df_funcs->get_hbm_channel_number(adev); |
770d13b1 | 813 | adev->gmc.vram_width = numchan * chansize; |
e60f8db5 | 814 | } |
e60f8db5 | 815 | |
e60f8db5 | 816 | /* size in MB on si */ |
770d13b1 | 817 | adev->gmc.mc_vram_size = |
bf383fb6 | 818 | adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; |
770d13b1 | 819 | adev->gmc.real_vram_size = adev->gmc.mc_vram_size; |
d6895ad3 CK |
820 | |
821 | if (!(adev->flags & AMD_IS_APU)) { | |
822 | r = amdgpu_device_resize_fb_bar(adev); | |
823 | if (r) | |
824 | return r; | |
825 | } | |
770d13b1 CK |
826 | adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); |
827 | adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); | |
e60f8db5 | 828 | |
156a81be CZ |
829 | #ifdef CONFIG_X86_64 |
830 | if (adev->flags & AMD_IS_APU) { | |
831 | adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev); | |
832 | adev->gmc.aper_size = adev->gmc.real_vram_size; | |
833 | } | |
834 | #endif | |
e60f8db5 | 835 | /* In case the PCI BAR is larger than the actual amount of vram */ |
770d13b1 CK |
836 | adev->gmc.visible_vram_size = adev->gmc.aper_size; |
837 | if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) | |
838 | adev->gmc.visible_vram_size = adev->gmc.real_vram_size; | |
e60f8db5 | 839 | |
c3db7b5a AD |
840 | /* set the gart size */ |
841 | if (amdgpu_gart_size == -1) { | |
842 | switch (adev->asic_type) { | |
843 | case CHIP_VEGA10: /* all engines support GPUVM */ | |
273a14cd | 844 | case CHIP_VEGA12: /* all engines support GPUVM */ |
d96b428c | 845 | case CHIP_VEGA20: |
c3db7b5a | 846 | default: |
fe19b862 | 847 | adev->gmc.gart_size = 512ULL << 20; |
c3db7b5a AD |
848 | break; |
849 | case CHIP_RAVEN: /* DCE SG support */ | |
770d13b1 | 850 | adev->gmc.gart_size = 1024ULL << 20; |
c3db7b5a AD |
851 | break; |
852 | } | |
853 | } else { | |
770d13b1 | 854 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
c3db7b5a AD |
855 | } |
856 | ||
770d13b1 | 857 | gmc_v9_0_vram_gtt_location(adev, &adev->gmc); |
e60f8db5 AX |
858 | |
859 | return 0; | |
860 | } | |
861 | ||
862 | static int gmc_v9_0_gart_init(struct amdgpu_device *adev) | |
863 | { | |
864 | int r; | |
865 | ||
1123b989 | 866 | if (adev->gart.bo) { |
e60f8db5 AX |
867 | WARN(1, "VEGA10 PCIE GART already initialized\n"); |
868 | return 0; | |
869 | } | |
870 | /* Initialize common gart structure */ | |
871 | r = amdgpu_gart_init(adev); | |
872 | if (r) | |
873 | return r; | |
874 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | |
875 | adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) | | |
876 | AMDGPU_PTE_EXECUTABLE; | |
877 | return amdgpu_gart_table_vram_alloc(adev); | |
878 | } | |
879 | ||
ebdef28e AD |
880 | static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) |
881 | { | |
ebdef28e | 882 | u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); |
ebdef28e AD |
883 | unsigned size; |
884 | ||
6f752ec2 AG |
885 | /* |
886 | * TODO Remove once GART corruption is resolved | |
887 | * Check related code in gmc_v9_0_sw_fini | |
888 | * */ | |
cd2b5623 AD |
889 | if (gmc_v9_0_keep_stolen_memory(adev)) |
890 | return 9 * 1024 * 1024; | |
6f752ec2 | 891 | |
ebdef28e AD |
892 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { |
893 | size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ | |
894 | } else { | |
895 | u32 viewport; | |
896 | ||
897 | switch (adev->asic_type) { | |
898 | case CHIP_RAVEN: | |
899 | viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); | |
900 | size = (REG_GET_FIELD(viewport, | |
901 | HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * | |
902 | REG_GET_FIELD(viewport, | |
903 | HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * | |
904 | 4); | |
905 | break; | |
906 | case CHIP_VEGA10: | |
907 | case CHIP_VEGA12: | |
cd2b5623 | 908 | case CHIP_VEGA20: |
ebdef28e AD |
909 | default: |
910 | viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); | |
911 | size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * | |
912 | REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * | |
913 | 4); | |
914 | break; | |
915 | } | |
916 | } | |
917 | /* return 0 if the pre-OS buffer uses up most of vram */ | |
918 | if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) | |
919 | return 0; | |
6f752ec2 | 920 | |
ebdef28e AD |
921 | return size; |
922 | } | |
923 | ||
e60f8db5 AX |
924 | static int gmc_v9_0_sw_init(void *handle) |
925 | { | |
926 | int r; | |
927 | int dma_bits; | |
928 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
929 | ||
0c8c0847 | 930 | gfxhub_v1_0_init(adev); |
77f6c763 | 931 | mmhub_v1_0_init(adev); |
0c8c0847 | 932 | |
770d13b1 | 933 | spin_lock_init(&adev->gmc.invalidate_lock); |
e60f8db5 | 934 | |
1e09b053 | 935 | adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); |
fd66560b HZ |
936 | switch (adev->asic_type) { |
937 | case CHIP_RAVEN: | |
6a42fd6f | 938 | if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { |
f3368128 | 939 | amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); |
6a42fd6f CK |
940 | } else { |
941 | /* vm_size is 128TB + 512GB for legacy 3-level page support */ | |
942 | amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); | |
770d13b1 | 943 | adev->gmc.translate_further = |
6a42fd6f CK |
944 | adev->vm_manager.num_level > 1; |
945 | } | |
fd66560b HZ |
946 | break; |
947 | case CHIP_VEGA10: | |
273a14cd | 948 | case CHIP_VEGA12: |
d96b428c | 949 | case CHIP_VEGA20: |
36b32a68 ZJ |
950 | /* |
951 | * To fulfill 4-level page support, | |
952 | * vm size is 256TB (48bit), maximum size of Vega10, | |
953 | * block size 512 (9bit) | |
954 | */ | |
f3368128 | 955 | amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); |
fd66560b HZ |
956 | break; |
957 | default: | |
958 | break; | |
e60f8db5 AX |
959 | } |
960 | ||
961 | /* This interrupt is VMC page fault.*/ | |
44a99b65 | 962 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, |
770d13b1 | 963 | &adev->gmc.vm_fault); |
30da7bb1 CK |
964 | if (r) |
965 | return r; | |
966 | ||
44a99b65 | 967 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, |
770d13b1 | 968 | &adev->gmc.vm_fault); |
e60f8db5 AX |
969 | |
970 | if (r) | |
971 | return r; | |
972 | ||
e60f8db5 AX |
973 | /* Set the internal MC address mask |
974 | * This is the max address of the GPU's | |
975 | * internal address space. | |
976 | */ | |
770d13b1 | 977 | adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ |
e60f8db5 AX |
978 | |
979 | /* set DMA mask + need_dma32 flags. | |
980 | * PCIE - can handle 44-bits. | |
981 | * IGP - can handle 44-bits | |
982 | * PCI - dma32 for legacy pci gart, 44 bits on vega10 | |
983 | */ | |
984 | adev->need_dma32 = false; | |
985 | dma_bits = adev->need_dma32 ? 32 : 44; | |
986 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
987 | if (r) { | |
988 | adev->need_dma32 = true; | |
989 | dma_bits = 32; | |
990 | printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); | |
991 | } | |
992 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
993 | if (r) { | |
994 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | |
995 | printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); | |
996 | } | |
fd5fd480 | 997 | adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); |
e60f8db5 | 998 | |
47622ba0 | 999 | if (adev->gmc.xgmi.supported) { |
bf0a60b7 AD |
1000 | r = gfxhub_v1_1_get_xgmi_info(adev); |
1001 | if (r) | |
1002 | return r; | |
1003 | } | |
1004 | ||
e60f8db5 AX |
1005 | r = gmc_v9_0_mc_init(adev); |
1006 | if (r) | |
1007 | return r; | |
1008 | ||
ebdef28e AD |
1009 | adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); |
1010 | ||
e60f8db5 AX |
1011 | /* Memory manager */ |
1012 | r = amdgpu_bo_init(adev); | |
1013 | if (r) | |
1014 | return r; | |
1015 | ||
1016 | r = gmc_v9_0_gart_init(adev); | |
1017 | if (r) | |
1018 | return r; | |
1019 | ||
05ec3eda CK |
1020 | /* |
1021 | * number of VMs | |
1022 | * VMID 0 is reserved for System | |
1023 | * amdgpu graphics/compute will use VMIDs 1-7 | |
1024 | * amdkfd will use VMIDs 8-15 | |
1025 | */ | |
1026 | adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; | |
1027 | adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; | |
1028 | ||
05ec3eda CK |
1029 | amdgpu_vm_manager_init(adev); |
1030 | ||
1031 | return 0; | |
e60f8db5 AX |
1032 | } |
1033 | ||
e60f8db5 AX |
1034 | static int gmc_v9_0_sw_fini(void *handle) |
1035 | { | |
1036 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1037 | ||
f59548c8 | 1038 | amdgpu_gem_force_release(adev); |
05ec3eda | 1039 | amdgpu_vm_manager_fini(adev); |
6f752ec2 | 1040 | |
cd2b5623 AD |
1041 | if (gmc_v9_0_keep_stolen_memory(adev)) |
1042 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); | |
6f752ec2 | 1043 | |
a3d9103e | 1044 | amdgpu_gart_table_vram_free(adev); |
e60f8db5 | 1045 | amdgpu_bo_fini(adev); |
a3d9103e | 1046 | amdgpu_gart_fini(adev); |
e60f8db5 AX |
1047 | |
1048 | return 0; | |
1049 | } | |
1050 | ||
1051 | static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) | |
1052 | { | |
946a4d5b | 1053 | |
e60f8db5 AX |
1054 | switch (adev->asic_type) { |
1055 | case CHIP_VEGA10: | |
d96b428c | 1056 | case CHIP_VEGA20: |
946a4d5b | 1057 | soc15_program_register_sequence(adev, |
5c583018 | 1058 | golden_settings_mmhub_1_0_0, |
c47b41a7 | 1059 | ARRAY_SIZE(golden_settings_mmhub_1_0_0)); |
946a4d5b | 1060 | soc15_program_register_sequence(adev, |
5c583018 | 1061 | golden_settings_athub_1_0_0, |
c47b41a7 | 1062 | ARRAY_SIZE(golden_settings_athub_1_0_0)); |
e60f8db5 | 1063 | break; |
273a14cd AD |
1064 | case CHIP_VEGA12: |
1065 | break; | |
e4f3abaa | 1066 | case CHIP_RAVEN: |
946a4d5b | 1067 | soc15_program_register_sequence(adev, |
5c583018 | 1068 | golden_settings_athub_1_0_0, |
c47b41a7 | 1069 | ARRAY_SIZE(golden_settings_athub_1_0_0)); |
e4f3abaa | 1070 | break; |
e60f8db5 AX |
1071 | default: |
1072 | break; | |
1073 | } | |
1074 | } | |
1075 | ||
1076 | /** | |
1077 | * gmc_v9_0_gart_enable - gart enable | |
1078 | * | |
1079 | * @adev: amdgpu_device pointer | |
1080 | */ | |
1081 | static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) | |
1082 | { | |
1083 | int r; | |
1084 | bool value; | |
1085 | u32 tmp; | |
1086 | ||
9c3f2b54 AD |
1087 | amdgpu_device_program_register_sequence(adev, |
1088 | golden_settings_vega10_hdp, | |
1089 | ARRAY_SIZE(golden_settings_vega10_hdp)); | |
e60f8db5 | 1090 | |
1123b989 | 1091 | if (adev->gart.bo == NULL) { |
e60f8db5 AX |
1092 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); |
1093 | return -EINVAL; | |
1094 | } | |
ce1b1b66 ML |
1095 | r = amdgpu_gart_table_vram_pin(adev); |
1096 | if (r) | |
1097 | return r; | |
e60f8db5 | 1098 | |
2fcd43ce HZ |
1099 | switch (adev->asic_type) { |
1100 | case CHIP_RAVEN: | |
f8386b35 | 1101 | mmhub_v1_0_update_power_gating(adev, true); |
2fcd43ce HZ |
1102 | break; |
1103 | default: | |
1104 | break; | |
1105 | } | |
1106 | ||
e60f8db5 AX |
1107 | r = gfxhub_v1_0_gart_enable(adev); |
1108 | if (r) | |
1109 | return r; | |
1110 | ||
1111 | r = mmhub_v1_0_gart_enable(adev); | |
1112 | if (r) | |
1113 | return r; | |
1114 | ||
846347c9 | 1115 | WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); |
e60f8db5 | 1116 | |
b9509c80 HR |
1117 | tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); |
1118 | WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); | |
e60f8db5 | 1119 | |
1d4e0a8c | 1120 | /* After HDP is initialized, flush HDP.*/ |
69882565 | 1121 | adev->nbio_funcs->hdp_flush(adev, NULL); |
1d4e0a8c | 1122 | |
e60f8db5 AX |
1123 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
1124 | value = false; | |
1125 | else | |
1126 | value = true; | |
1127 | ||
1128 | gfxhub_v1_0_set_fault_enable_default(adev, value); | |
1129 | mmhub_v1_0_set_fault_enable_default(adev, value); | |
2a79d868 | 1130 | gmc_v9_0_flush_gpu_tlb(adev, 0, 0); |
e60f8db5 AX |
1131 | |
1132 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | |
770d13b1 | 1133 | (unsigned)(adev->gmc.gart_size >> 20), |
4e830fb1 | 1134 | (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); |
e60f8db5 AX |
1135 | adev->gart.ready = true; |
1136 | return 0; | |
1137 | } | |
1138 | ||
1139 | static int gmc_v9_0_hw_init(void *handle) | |
1140 | { | |
1141 | int r; | |
1142 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1143 | ||
1144 | /* The sequence of these two function calls matters.*/ | |
1145 | gmc_v9_0_init_golden_registers(adev); | |
1146 | ||
edca2d05 | 1147 | if (adev->mode_info.num_crtc) { |
edca2d05 | 1148 | /* Lockout access through VGA aperture*/ |
4d9c333a | 1149 | WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); |
edca2d05 AD |
1150 | |
1151 | /* disable VGA render */ | |
4d9c333a | 1152 | WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); |
edca2d05 AD |
1153 | } |
1154 | ||
e60f8db5 AX |
1155 | r = gmc_v9_0_gart_enable(adev); |
1156 | ||
1157 | return r; | |
1158 | } | |
1159 | ||
1160 | /** | |
1161 | * gmc_v9_0_gart_disable - gart disable | |
1162 | * | |
1163 | * @adev: amdgpu_device pointer | |
1164 | * | |
1165 | * This disables all VM page table. | |
1166 | */ | |
1167 | static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) | |
1168 | { | |
1169 | gfxhub_v1_0_gart_disable(adev); | |
1170 | mmhub_v1_0_gart_disable(adev); | |
ce1b1b66 | 1171 | amdgpu_gart_table_vram_unpin(adev); |
e60f8db5 AX |
1172 | } |
1173 | ||
1174 | static int gmc_v9_0_hw_fini(void *handle) | |
1175 | { | |
1176 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1177 | ||
5dd696ae TH |
1178 | if (amdgpu_sriov_vf(adev)) { |
1179 | /* full access mode, so don't touch any GMC register */ | |
1180 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); | |
1181 | return 0; | |
1182 | } | |
1183 | ||
770d13b1 | 1184 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
e60f8db5 AX |
1185 | gmc_v9_0_gart_disable(adev); |
1186 | ||
1187 | return 0; | |
1188 | } | |
1189 | ||
1190 | static int gmc_v9_0_suspend(void *handle) | |
1191 | { | |
1192 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1193 | ||
f053cd47 | 1194 | return gmc_v9_0_hw_fini(adev); |
e60f8db5 AX |
1195 | } |
1196 | ||
1197 | static int gmc_v9_0_resume(void *handle) | |
1198 | { | |
1199 | int r; | |
1200 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1201 | ||
1202 | r = gmc_v9_0_hw_init(adev); | |
1203 | if (r) | |
1204 | return r; | |
1205 | ||
620f774f | 1206 | amdgpu_vmid_reset_all(adev); |
e60f8db5 | 1207 | |
32601d48 | 1208 | return 0; |
e60f8db5 AX |
1209 | } |
1210 | ||
1211 | static bool gmc_v9_0_is_idle(void *handle) | |
1212 | { | |
1213 | /* MC is always ready in GMC v9.*/ | |
1214 | return true; | |
1215 | } | |
1216 | ||
1217 | static int gmc_v9_0_wait_for_idle(void *handle) | |
1218 | { | |
1219 | /* There is no need to wait for MC idle in GMC v9.*/ | |
1220 | return 0; | |
1221 | } | |
1222 | ||
1223 | static int gmc_v9_0_soft_reset(void *handle) | |
1224 | { | |
1225 | /* XXX for emulation.*/ | |
1226 | return 0; | |
1227 | } | |
1228 | ||
1229 | static int gmc_v9_0_set_clockgating_state(void *handle, | |
1230 | enum amd_clockgating_state state) | |
1231 | { | |
d5583d4f HR |
1232 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1233 | ||
1234 | return mmhub_v1_0_set_clockgating(adev, state); | |
e60f8db5 AX |
1235 | } |
1236 | ||
13052be5 HR |
1237 | static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) |
1238 | { | |
1239 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1240 | ||
1241 | mmhub_v1_0_get_clockgating(adev, flags); | |
1242 | } | |
1243 | ||
e60f8db5 AX |
1244 | static int gmc_v9_0_set_powergating_state(void *handle, |
1245 | enum amd_powergating_state state) | |
1246 | { | |
1247 | return 0; | |
1248 | } | |
1249 | ||
1250 | const struct amd_ip_funcs gmc_v9_0_ip_funcs = { | |
1251 | .name = "gmc_v9_0", | |
1252 | .early_init = gmc_v9_0_early_init, | |
1253 | .late_init = gmc_v9_0_late_init, | |
1254 | .sw_init = gmc_v9_0_sw_init, | |
1255 | .sw_fini = gmc_v9_0_sw_fini, | |
1256 | .hw_init = gmc_v9_0_hw_init, | |
1257 | .hw_fini = gmc_v9_0_hw_fini, | |
1258 | .suspend = gmc_v9_0_suspend, | |
1259 | .resume = gmc_v9_0_resume, | |
1260 | .is_idle = gmc_v9_0_is_idle, | |
1261 | .wait_for_idle = gmc_v9_0_wait_for_idle, | |
1262 | .soft_reset = gmc_v9_0_soft_reset, | |
1263 | .set_clockgating_state = gmc_v9_0_set_clockgating_state, | |
1264 | .set_powergating_state = gmc_v9_0_set_powergating_state, | |
13052be5 | 1265 | .get_clockgating_state = gmc_v9_0_get_clockgating_state, |
e60f8db5 AX |
1266 | }; |
1267 | ||
1268 | const struct amdgpu_ip_block_version gmc_v9_0_ip_block = | |
1269 | { | |
1270 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1271 | .major = 9, | |
1272 | .minor = 0, | |
1273 | .rev = 0, | |
1274 | .funcs = &gmc_v9_0_ip_funcs, | |
1275 | }; |