]>
Commit | Line | Data |
---|---|---|
e60f8db5 AX |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
24 | #include "amdgpu.h" | |
25 | #include "gmc_v9_0.h" | |
8d6a5230 | 26 | #include "amdgpu_atomfirmware.h" |
e60f8db5 AX |
27 | |
28 | #include "vega10/soc15ip.h" | |
75199b8c FX |
29 | #include "hdp/hdp_4_0_offset.h" |
30 | #include "hdp/hdp_4_0_sh_mask.h" | |
e60f8db5 | 31 | #include "vega10/GC/gc_9_0_sh_mask.h" |
edca2d05 AD |
32 | #include "vega10/DC/dce_12_0_offset.h" |
33 | #include "vega10/DC/dce_12_0_sh_mask.h" | |
e60f8db5 | 34 | #include "vega10/vega10_enum.h" |
5c583018 EQ |
35 | #include "vega10/MMHUB/mmhub_1_0_offset.h" |
36 | #include "vega10/ATHUB/athub_1_0_offset.h" | |
e60f8db5 AX |
37 | |
38 | #include "soc15_common.h" | |
02bab923 | 39 | #include "vega10/UMC/umc_6_0_sh_mask.h" |
e60f8db5 AX |
40 | |
41 | #include "nbio_v6_1.h" | |
aecbe64f | 42 | #include "nbio_v7_0.h" |
e60f8db5 AX |
43 | #include "gfxhub_v1_0.h" |
44 | #include "mmhub_v1_0.h" | |
45 | ||
46 | #define mmDF_CS_AON0_DramBaseAddress0 0x0044 | |
47 | #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 | |
48 | //DF_CS_AON0_DramBaseAddress0 | |
49 | #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 | |
50 | #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 | |
51 | #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 | |
52 | #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 | |
53 | #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc | |
54 | #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L | |
55 | #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L | |
56 | #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L | |
57 | #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L | |
58 | #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L | |
59 | ||
60 | /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ | |
61 | #define AMDGPU_NUM_OF_VMIDS 8 | |
62 | ||
63 | static const u32 golden_settings_vega10_hdp[] = | |
64 | { | |
65 | 0xf64, 0x0fffffff, 0x00000000, | |
66 | 0xf65, 0x0fffffff, 0x00000000, | |
67 | 0xf66, 0x0fffffff, 0x00000000, | |
68 | 0xf67, 0x0fffffff, 0x00000000, | |
69 | 0xf68, 0x0fffffff, 0x00000000, | |
70 | 0xf6a, 0x0fffffff, 0x00000000, | |
71 | 0xf6b, 0x0fffffff, 0x00000000, | |
72 | 0xf6c, 0x0fffffff, 0x00000000, | |
73 | 0xf6d, 0x0fffffff, 0x00000000, | |
74 | 0xf6e, 0x0fffffff, 0x00000000, | |
75 | }; | |
76 | ||
5c583018 EQ |
77 | static const u32 golden_settings_mmhub_1_0_0[] = |
78 | { | |
79 | SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa, | |
80 | SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565 | |
81 | }; | |
82 | ||
83 | static const u32 golden_settings_athub_1_0_0[] = | |
84 | { | |
85 | SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800, | |
86 | SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008 | |
87 | }; | |
88 | ||
02bab923 DP |
89 | /* Ecc related register addresses, (BASE + reg offset) */ |
90 | /* Universal Memory Controller caps (may be fused). */ | |
91 | /* UMCCH:UmcLocalCap */ | |
92 | #define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000) | |
93 | #define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800) | |
94 | #define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000) | |
95 | #define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800) | |
96 | #define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000) | |
97 | #define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800) | |
98 | #define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000) | |
99 | #define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800) | |
100 | #define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000) | |
101 | #define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800) | |
102 | #define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000) | |
103 | #define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800) | |
104 | #define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000) | |
105 | #define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800) | |
106 | #define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000) | |
107 | #define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800) | |
108 | ||
109 | /* Universal Memory Controller Channel config. */ | |
110 | /* UMCCH:UMC_CONFIG */ | |
111 | #define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000) | |
112 | #define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800) | |
113 | #define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000) | |
114 | #define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800) | |
115 | #define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000) | |
116 | #define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800) | |
117 | #define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000) | |
118 | #define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800) | |
119 | #define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000) | |
120 | #define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800) | |
121 | #define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000) | |
122 | #define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800) | |
123 | #define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000) | |
124 | #define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800) | |
125 | #define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000) | |
126 | #define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800) | |
127 | ||
128 | /* Universal Memory Controller Channel Ecc config. */ | |
129 | /* UMCCH:EccCtrl */ | |
130 | #define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000) | |
131 | #define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800) | |
132 | #define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000) | |
133 | #define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800) | |
134 | #define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000) | |
135 | #define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800) | |
136 | #define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000) | |
137 | #define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800) | |
138 | #define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000) | |
139 | #define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800) | |
140 | #define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000) | |
141 | #define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800) | |
142 | #define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000) | |
143 | #define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800) | |
144 | #define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000) | |
145 | #define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800) | |
146 | ||
147 | static const uint32_t ecc_umclocalcap_addrs[] = { | |
148 | UMCLOCALCAPS_ADDR0, | |
149 | UMCLOCALCAPS_ADDR1, | |
150 | UMCLOCALCAPS_ADDR2, | |
151 | UMCLOCALCAPS_ADDR3, | |
152 | UMCLOCALCAPS_ADDR4, | |
153 | UMCLOCALCAPS_ADDR5, | |
154 | UMCLOCALCAPS_ADDR6, | |
155 | UMCLOCALCAPS_ADDR7, | |
156 | UMCLOCALCAPS_ADDR8, | |
157 | UMCLOCALCAPS_ADDR9, | |
158 | UMCLOCALCAPS_ADDR10, | |
159 | UMCLOCALCAPS_ADDR11, | |
160 | UMCLOCALCAPS_ADDR12, | |
161 | UMCLOCALCAPS_ADDR13, | |
162 | UMCLOCALCAPS_ADDR14, | |
163 | UMCLOCALCAPS_ADDR15, | |
164 | }; | |
165 | ||
166 | static const uint32_t ecc_umcch_umc_config_addrs[] = { | |
167 | UMCCH_UMC_CONFIG_ADDR0, | |
168 | UMCCH_UMC_CONFIG_ADDR1, | |
169 | UMCCH_UMC_CONFIG_ADDR2, | |
170 | UMCCH_UMC_CONFIG_ADDR3, | |
171 | UMCCH_UMC_CONFIG_ADDR4, | |
172 | UMCCH_UMC_CONFIG_ADDR5, | |
173 | UMCCH_UMC_CONFIG_ADDR6, | |
174 | UMCCH_UMC_CONFIG_ADDR7, | |
175 | UMCCH_UMC_CONFIG_ADDR8, | |
176 | UMCCH_UMC_CONFIG_ADDR9, | |
177 | UMCCH_UMC_CONFIG_ADDR10, | |
178 | UMCCH_UMC_CONFIG_ADDR11, | |
179 | UMCCH_UMC_CONFIG_ADDR12, | |
180 | UMCCH_UMC_CONFIG_ADDR13, | |
181 | UMCCH_UMC_CONFIG_ADDR14, | |
182 | UMCCH_UMC_CONFIG_ADDR15, | |
183 | }; | |
184 | ||
185 | static const uint32_t ecc_umcch_eccctrl_addrs[] = { | |
186 | UMCCH_ECCCTRL_ADDR0, | |
187 | UMCCH_ECCCTRL_ADDR1, | |
188 | UMCCH_ECCCTRL_ADDR2, | |
189 | UMCCH_ECCCTRL_ADDR3, | |
190 | UMCCH_ECCCTRL_ADDR4, | |
191 | UMCCH_ECCCTRL_ADDR5, | |
192 | UMCCH_ECCCTRL_ADDR6, | |
193 | UMCCH_ECCCTRL_ADDR7, | |
194 | UMCCH_ECCCTRL_ADDR8, | |
195 | UMCCH_ECCCTRL_ADDR9, | |
196 | UMCCH_ECCCTRL_ADDR10, | |
197 | UMCCH_ECCCTRL_ADDR11, | |
198 | UMCCH_ECCCTRL_ADDR12, | |
199 | UMCCH_ECCCTRL_ADDR13, | |
200 | UMCCH_ECCCTRL_ADDR14, | |
201 | UMCCH_ECCCTRL_ADDR15, | |
202 | }; | |
203 | ||
e60f8db5 AX |
204 | static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
205 | struct amdgpu_irq_src *src, | |
206 | unsigned type, | |
207 | enum amdgpu_interrupt_state state) | |
208 | { | |
209 | struct amdgpu_vmhub *hub; | |
ae6d1416 | 210 | u32 tmp, reg, bits, i, j; |
e60f8db5 | 211 | |
11250164 CK |
212 | bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
213 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
214 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
215 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
216 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
217 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
218 | VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; | |
219 | ||
e60f8db5 AX |
220 | switch (state) { |
221 | case AMDGPU_IRQ_STATE_DISABLE: | |
ae6d1416 TSD |
222 | for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { |
223 | hub = &adev->vmhub[j]; | |
224 | for (i = 0; i < 16; i++) { | |
225 | reg = hub->vm_context0_cntl + i; | |
226 | tmp = RREG32(reg); | |
227 | tmp &= ~bits; | |
228 | WREG32(reg, tmp); | |
229 | } | |
e60f8db5 AX |
230 | } |
231 | break; | |
232 | case AMDGPU_IRQ_STATE_ENABLE: | |
ae6d1416 TSD |
233 | for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { |
234 | hub = &adev->vmhub[j]; | |
235 | for (i = 0; i < 16; i++) { | |
236 | reg = hub->vm_context0_cntl + i; | |
237 | tmp = RREG32(reg); | |
238 | tmp |= bits; | |
239 | WREG32(reg, tmp); | |
240 | } | |
e60f8db5 | 241 | } |
e60f8db5 AX |
242 | default: |
243 | break; | |
244 | } | |
245 | ||
246 | return 0; | |
247 | } | |
248 | ||
249 | static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, | |
250 | struct amdgpu_irq_src *source, | |
251 | struct amdgpu_iv_entry *entry) | |
252 | { | |
5a9b8e8a | 253 | struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src]; |
4d6cbde3 | 254 | uint32_t status = 0; |
e60f8db5 AX |
255 | u64 addr; |
256 | ||
257 | addr = (u64)entry->src_data[0] << 12; | |
258 | addr |= ((u64)entry->src_data[1] & 0xf) << 44; | |
259 | ||
79a0c465 | 260 | if (!amdgpu_sriov_vf(adev)) { |
5a9b8e8a CK |
261 | status = RREG32(hub->vm_l2_pro_fault_status); |
262 | WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); | |
4d6cbde3 | 263 | } |
e60f8db5 | 264 | |
4d6cbde3 FK |
265 | if (printk_ratelimit()) { |
266 | dev_err(adev->dev, | |
267 | "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n", | |
268 | entry->vm_id_src ? "mmhub" : "gfxhub", | |
269 | entry->src_id, entry->ring_id, entry->vm_id, | |
270 | entry->pas_id); | |
271 | dev_err(adev->dev, " at page 0x%016llx from %d\n", | |
272 | addr, entry->client_id); | |
273 | if (!amdgpu_sriov_vf(adev)) | |
274 | dev_err(adev->dev, | |
275 | "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", | |
276 | status); | |
79a0c465 | 277 | } |
e60f8db5 AX |
278 | |
279 | return 0; | |
280 | } | |
281 | ||
282 | static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { | |
283 | .set = gmc_v9_0_vm_fault_interrupt_state, | |
284 | .process = gmc_v9_0_process_interrupt, | |
285 | }; | |
286 | ||
287 | static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) | |
288 | { | |
289 | adev->mc.vm_fault.num_types = 1; | |
290 | adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; | |
291 | } | |
292 | ||
03f89feb CK |
293 | static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id) |
294 | { | |
295 | u32 req = 0; | |
296 | ||
297 | /* invalidate using legacy mode on vm_id*/ | |
298 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, | |
299 | PER_VMID_INVALIDATE_REQ, 1 << vm_id); | |
300 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); | |
301 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); | |
302 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); | |
303 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); | |
304 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); | |
305 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); | |
306 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, | |
307 | CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); | |
308 | ||
309 | return req; | |
310 | } | |
311 | ||
e60f8db5 AX |
312 | /* |
313 | * GART | |
314 | * VMID 0 is the physical GPU addresses as used by the kernel. | |
315 | * VMIDs 1-15 are used for userspace clients and are handled | |
316 | * by the amdgpu vm/hsa code. | |
317 | */ | |
318 | ||
319 | /** | |
320 | * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback | |
321 | * | |
322 | * @adev: amdgpu_device pointer | |
323 | * @vmid: vm instance to flush | |
324 | * | |
325 | * Flush the TLB for the requested page table. | |
326 | */ | |
327 | static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, | |
328 | uint32_t vmid) | |
329 | { | |
330 | /* Use register 17 for GART */ | |
331 | const unsigned eng = 17; | |
332 | unsigned i, j; | |
333 | ||
334 | /* flush hdp cache */ | |
aecbe64f CZ |
335 | if (adev->flags & AMD_IS_APU) |
336 | nbio_v7_0_hdp_flush(adev); | |
337 | else | |
338 | nbio_v6_1_hdp_flush(adev); | |
e60f8db5 AX |
339 | |
340 | spin_lock(&adev->mc.invalidate_lock); | |
341 | ||
342 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | |
343 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; | |
03f89feb | 344 | u32 tmp = gmc_v9_0_get_invalidate_req(vmid); |
e60f8db5 | 345 | |
c7a7266b | 346 | WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); |
e60f8db5 AX |
347 | |
348 | /* Busy wait for ACK.*/ | |
349 | for (j = 0; j < 100; j++) { | |
c7a7266b | 350 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); |
e60f8db5 AX |
351 | tmp &= 1 << vmid; |
352 | if (tmp) | |
353 | break; | |
354 | cpu_relax(); | |
355 | } | |
356 | if (j < 100) | |
357 | continue; | |
358 | ||
359 | /* Wait for ACK with a delay.*/ | |
360 | for (j = 0; j < adev->usec_timeout; j++) { | |
c7a7266b | 361 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); |
e60f8db5 AX |
362 | tmp &= 1 << vmid; |
363 | if (tmp) | |
364 | break; | |
365 | udelay(1); | |
366 | } | |
367 | if (j < adev->usec_timeout) | |
368 | continue; | |
369 | ||
370 | DRM_ERROR("Timeout waiting for VM flush ACK!\n"); | |
371 | } | |
372 | ||
373 | spin_unlock(&adev->mc.invalidate_lock); | |
374 | } | |
375 | ||
376 | /** | |
377 | * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO | |
378 | * | |
379 | * @adev: amdgpu_device pointer | |
380 | * @cpu_pt_addr: cpu address of the page table | |
381 | * @gpu_page_idx: entry in the page table to update | |
382 | * @addr: dst addr to write into pte/pde | |
383 | * @flags: access flags | |
384 | * | |
385 | * Update the page tables using the CPU. | |
386 | */ | |
387 | static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev, | |
388 | void *cpu_pt_addr, | |
389 | uint32_t gpu_page_idx, | |
390 | uint64_t addr, | |
391 | uint64_t flags) | |
392 | { | |
393 | void __iomem *ptr = (void *)cpu_pt_addr; | |
394 | uint64_t value; | |
395 | ||
396 | /* | |
397 | * PTE format on VEGA 10: | |
398 | * 63:59 reserved | |
399 | * 58:57 mtype | |
400 | * 56 F | |
401 | * 55 L | |
402 | * 54 P | |
403 | * 53 SW | |
404 | * 52 T | |
405 | * 50:48 reserved | |
406 | * 47:12 4k physical page base address | |
407 | * 11:7 fragment | |
408 | * 6 write | |
409 | * 5 read | |
410 | * 4 exe | |
411 | * 3 Z | |
412 | * 2 snooped | |
413 | * 1 system | |
414 | * 0 valid | |
415 | * | |
416 | * PDE format on VEGA 10: | |
417 | * 63:59 block fragment size | |
418 | * 58:55 reserved | |
419 | * 54 P | |
420 | * 53:48 reserved | |
421 | * 47:6 physical base address of PD or PTE | |
422 | * 5:3 reserved | |
423 | * 2 C | |
424 | * 1 system | |
425 | * 0 valid | |
426 | */ | |
427 | ||
428 | /* | |
429 | * The following is for PTE only. GART does not have PDEs. | |
430 | */ | |
431 | value = addr & 0x0000FFFFFFFFF000ULL; | |
432 | value |= flags; | |
433 | writeq(value, ptr + (gpu_page_idx * 8)); | |
434 | return 0; | |
435 | } | |
436 | ||
437 | static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, | |
438 | uint32_t flags) | |
439 | ||
440 | { | |
441 | uint64_t pte_flag = 0; | |
442 | ||
443 | if (flags & AMDGPU_VM_PAGE_EXECUTABLE) | |
444 | pte_flag |= AMDGPU_PTE_EXECUTABLE; | |
445 | if (flags & AMDGPU_VM_PAGE_READABLE) | |
446 | pte_flag |= AMDGPU_PTE_READABLE; | |
447 | if (flags & AMDGPU_VM_PAGE_WRITEABLE) | |
448 | pte_flag |= AMDGPU_PTE_WRITEABLE; | |
449 | ||
450 | switch (flags & AMDGPU_VM_MTYPE_MASK) { | |
451 | case AMDGPU_VM_MTYPE_DEFAULT: | |
452 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
453 | break; | |
454 | case AMDGPU_VM_MTYPE_NC: | |
455 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
456 | break; | |
457 | case AMDGPU_VM_MTYPE_WC: | |
458 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC); | |
459 | break; | |
460 | case AMDGPU_VM_MTYPE_CC: | |
461 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC); | |
462 | break; | |
463 | case AMDGPU_VM_MTYPE_UC: | |
464 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC); | |
465 | break; | |
466 | default: | |
467 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
468 | break; | |
469 | } | |
470 | ||
471 | if (flags & AMDGPU_VM_PAGE_PRT) | |
472 | pte_flag |= AMDGPU_PTE_PRT; | |
473 | ||
474 | return pte_flag; | |
475 | } | |
476 | ||
b1166325 | 477 | static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr) |
e60f8db5 | 478 | { |
b1166325 CK |
479 | addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start; |
480 | BUG_ON(addr & 0xFFFF00000000003FULL); | |
481 | return addr; | |
e60f8db5 AX |
482 | } |
483 | ||
f75e237c CK |
484 | static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { |
485 | .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, | |
486 | .set_pte_pde = gmc_v9_0_gart_set_pte_pde, | |
03f89feb | 487 | .get_invalidate_req = gmc_v9_0_get_invalidate_req, |
b1166325 CK |
488 | .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, |
489 | .get_vm_pde = gmc_v9_0_get_vm_pde | |
e60f8db5 AX |
490 | }; |
491 | ||
f75e237c | 492 | static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) |
e60f8db5 | 493 | { |
f75e237c CK |
494 | if (adev->gart.gart_funcs == NULL) |
495 | adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; | |
e60f8db5 AX |
496 | } |
497 | ||
498 | static int gmc_v9_0_early_init(void *handle) | |
499 | { | |
500 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
501 | ||
502 | gmc_v9_0_set_gart_funcs(adev); | |
e60f8db5 AX |
503 | gmc_v9_0_set_irq_funcs(adev); |
504 | ||
505 | return 0; | |
506 | } | |
507 | ||
02bab923 DP |
508 | static int gmc_v9_0_ecc_available(struct amdgpu_device *adev) |
509 | { | |
510 | uint32_t reg_val; | |
511 | uint32_t reg_addr; | |
512 | uint32_t field_val; | |
513 | size_t i; | |
514 | uint32_t fv2; | |
515 | size_t lost_sheep; | |
516 | ||
517 | DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n"); | |
518 | ||
519 | lost_sheep = 0; | |
520 | for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) { | |
521 | reg_addr = ecc_umclocalcap_addrs[i]; | |
522 | DRM_DEBUG("ecc: " | |
523 | "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n", | |
524 | i, reg_addr); | |
525 | reg_val = RREG32(reg_addr); | |
526 | field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap, | |
527 | EccDis); | |
528 | DRM_DEBUG("ecc: " | |
529 | "reg_val: 0x%08x, " | |
530 | "EccDis: 0x%08x, ", | |
531 | reg_val, field_val); | |
532 | if (field_val) { | |
533 | DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n"); | |
534 | ++lost_sheep; | |
535 | } | |
536 | } | |
537 | ||
538 | for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) { | |
539 | reg_addr = ecc_umcch_umc_config_addrs[i]; | |
540 | DRM_DEBUG("ecc: " | |
541 | "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x", | |
542 | i, reg_addr); | |
543 | reg_val = RREG32(reg_addr); | |
544 | field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG, | |
545 | DramReady); | |
546 | DRM_DEBUG("ecc: " | |
547 | "reg_val: 0x%08x, " | |
548 | "DramReady: 0x%08x\n", | |
549 | reg_val, field_val); | |
550 | ||
551 | if (!field_val) { | |
552 | DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n"); | |
553 | ++lost_sheep; | |
554 | } | |
555 | } | |
556 | ||
557 | for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) { | |
558 | reg_addr = ecc_umcch_eccctrl_addrs[i]; | |
559 | DRM_DEBUG("ecc: " | |
560 | "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ", | |
561 | i, reg_addr); | |
562 | reg_val = RREG32(reg_addr); | |
563 | field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl, | |
564 | WrEccEn); | |
565 | fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl, | |
566 | RdEccEn); | |
567 | DRM_DEBUG("ecc: " | |
568 | "reg_val: 0x%08x, " | |
569 | "WrEccEn: 0x%08x, " | |
570 | "RdEccEn: 0x%08x\n", | |
571 | reg_val, field_val, fv2); | |
572 | ||
573 | if (!field_val) { | |
5a16008f | 574 | DRM_DEBUG("ecc: WrEccEn is not set\n"); |
02bab923 DP |
575 | ++lost_sheep; |
576 | } | |
577 | if (!fv2) { | |
5a16008f | 578 | DRM_DEBUG("ecc: RdEccEn is not set\n"); |
02bab923 DP |
579 | ++lost_sheep; |
580 | } | |
581 | } | |
582 | ||
583 | DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep); | |
584 | return lost_sheep == 0; | |
585 | } | |
586 | ||
e60f8db5 AX |
587 | static int gmc_v9_0_late_init(void *handle) |
588 | { | |
589 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
c5066129 | 590 | /* |
591 | * The latest engine allocation on gfx9 is: | |
592 | * Engine 0, 1: idle | |
593 | * Engine 2, 3: firmware | |
594 | * Engine 4~13: amdgpu ring, subject to change when ring number changes | |
595 | * Engine 14~15: idle | |
596 | * Engine 16: kfd tlb invalidation | |
597 | * Engine 17: Gart flushes | |
598 | */ | |
599 | unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; | |
4789c463 | 600 | unsigned i; |
02bab923 | 601 | int r; |
4789c463 CK |
602 | |
603 | for(i = 0; i < adev->num_rings; ++i) { | |
604 | struct amdgpu_ring *ring = adev->rings[i]; | |
605 | unsigned vmhub = ring->funcs->vmhub; | |
606 | ||
607 | ring->vm_inv_eng = vm_inv_eng[vmhub]++; | |
775f55f1 TSD |
608 | dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", |
609 | ring->idx, ring->name, ring->vm_inv_eng, | |
610 | ring->funcs->vmhub); | |
4789c463 CK |
611 | } |
612 | ||
c5066129 | 613 | /* Engine 16 is used for KFD and 17 for GART flushes */ |
4789c463 | 614 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) |
c5066129 | 615 | BUG_ON(vm_inv_eng[i] > 16); |
4789c463 | 616 | |
02bab923 DP |
617 | r = gmc_v9_0_ecc_available(adev); |
618 | if (r == 1) { | |
619 | DRM_INFO("ECC is active.\n"); | |
620 | } else if (r == 0) { | |
621 | DRM_INFO("ECC is not present.\n"); | |
622 | } else { | |
623 | DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r); | |
624 | return r; | |
625 | } | |
626 | ||
e60f8db5 AX |
627 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); |
628 | } | |
629 | ||
630 | static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, | |
631 | struct amdgpu_mc *mc) | |
632 | { | |
eeb2487d ML |
633 | u64 base = 0; |
634 | if (!amdgpu_sriov_vf(adev)) | |
635 | base = mmhub_v1_0_get_fb_location(adev); | |
e60f8db5 | 636 | amdgpu_vram_location(adev, &adev->mc, base); |
6f02a696 | 637 | amdgpu_gart_location(adev, mc); |
bc099ee9 CZ |
638 | /* base offset of vram pages */ |
639 | if (adev->flags & AMD_IS_APU) | |
640 | adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); | |
641 | else | |
642 | adev->vm_manager.vram_base_offset = 0; | |
e60f8db5 AX |
643 | } |
644 | ||
645 | /** | |
646 | * gmc_v9_0_mc_init - initialize the memory controller driver params | |
647 | * | |
648 | * @adev: amdgpu_device pointer | |
649 | * | |
650 | * Look up the amount of vram, vram width, and decide how to place | |
651 | * vram and gart within the GPU's physical address space. | |
652 | * Returns 0 for success. | |
653 | */ | |
654 | static int gmc_v9_0_mc_init(struct amdgpu_device *adev) | |
655 | { | |
656 | u32 tmp; | |
657 | int chansize, numchan; | |
d6895ad3 | 658 | int r; |
e60f8db5 | 659 | |
8d6a5230 AD |
660 | adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); |
661 | if (!adev->mc.vram_width) { | |
662 | /* hbm memory channel size */ | |
663 | chansize = 128; | |
664 | ||
665 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); | |
666 | tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; | |
667 | tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; | |
668 | switch (tmp) { | |
669 | case 0: | |
670 | default: | |
671 | numchan = 1; | |
672 | break; | |
673 | case 1: | |
674 | numchan = 2; | |
675 | break; | |
676 | case 2: | |
677 | numchan = 0; | |
678 | break; | |
679 | case 3: | |
680 | numchan = 4; | |
681 | break; | |
682 | case 4: | |
683 | numchan = 0; | |
684 | break; | |
685 | case 5: | |
686 | numchan = 8; | |
687 | break; | |
688 | case 6: | |
689 | numchan = 0; | |
690 | break; | |
691 | case 7: | |
692 | numchan = 16; | |
693 | break; | |
694 | case 8: | |
695 | numchan = 2; | |
696 | break; | |
697 | } | |
698 | adev->mc.vram_width = numchan * chansize; | |
e60f8db5 | 699 | } |
e60f8db5 | 700 | |
e60f8db5 AX |
701 | /* size in MB on si */ |
702 | adev->mc.mc_vram_size = | |
aecbe64f CZ |
703 | ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) : |
704 | nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL; | |
e60f8db5 | 705 | adev->mc.real_vram_size = adev->mc.mc_vram_size; |
d6895ad3 CK |
706 | |
707 | if (!(adev->flags & AMD_IS_APU)) { | |
708 | r = amdgpu_device_resize_fb_bar(adev); | |
709 | if (r) | |
710 | return r; | |
711 | } | |
712 | adev->mc.aper_base = pci_resource_start(adev->pdev, 0); | |
713 | adev->mc.aper_size = pci_resource_len(adev->pdev, 0); | |
e60f8db5 AX |
714 | |
715 | /* In case the PCI BAR is larger than the actual amount of vram */ | |
d6895ad3 | 716 | adev->mc.visible_vram_size = adev->mc.aper_size; |
e60f8db5 AX |
717 | if (adev->mc.visible_vram_size > adev->mc.real_vram_size) |
718 | adev->mc.visible_vram_size = adev->mc.real_vram_size; | |
719 | ||
c3db7b5a AD |
720 | /* set the gart size */ |
721 | if (amdgpu_gart_size == -1) { | |
722 | switch (adev->asic_type) { | |
723 | case CHIP_VEGA10: /* all engines support GPUVM */ | |
724 | default: | |
725 | adev->mc.gart_size = 256ULL << 20; | |
726 | break; | |
727 | case CHIP_RAVEN: /* DCE SG support */ | |
728 | adev->mc.gart_size = 1024ULL << 20; | |
729 | break; | |
730 | } | |
731 | } else { | |
732 | adev->mc.gart_size = (u64)amdgpu_gart_size << 20; | |
733 | } | |
734 | ||
e60f8db5 AX |
735 | gmc_v9_0_vram_gtt_location(adev, &adev->mc); |
736 | ||
737 | return 0; | |
738 | } | |
739 | ||
740 | static int gmc_v9_0_gart_init(struct amdgpu_device *adev) | |
741 | { | |
742 | int r; | |
743 | ||
744 | if (adev->gart.robj) { | |
745 | WARN(1, "VEGA10 PCIE GART already initialized\n"); | |
746 | return 0; | |
747 | } | |
748 | /* Initialize common gart structure */ | |
749 | r = amdgpu_gart_init(adev); | |
750 | if (r) | |
751 | return r; | |
752 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | |
753 | adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) | | |
754 | AMDGPU_PTE_EXECUTABLE; | |
755 | return amdgpu_gart_table_vram_alloc(adev); | |
756 | } | |
757 | ||
e60f8db5 AX |
758 | static int gmc_v9_0_sw_init(void *handle) |
759 | { | |
760 | int r; | |
761 | int dma_bits; | |
762 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
763 | ||
0c8c0847 | 764 | gfxhub_v1_0_init(adev); |
77f6c763 | 765 | mmhub_v1_0_init(adev); |
0c8c0847 | 766 | |
e60f8db5 AX |
767 | spin_lock_init(&adev->mc.invalidate_lock); |
768 | ||
fd66560b HZ |
769 | switch (adev->asic_type) { |
770 | case CHIP_RAVEN: | |
e60f8db5 | 771 | adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; |
fd66560b | 772 | if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { |
fdd5faaa | 773 | adev->vm_manager.max_pfn = 1ULL << 36; |
fd66560b HZ |
774 | adev->vm_manager.block_size = 9; |
775 | adev->vm_manager.num_level = 3; | |
d07f14be | 776 | amdgpu_vm_set_fragment_size(adev, 9); |
fd66560b | 777 | } else { |
d07f14be RH |
778 | /* vm_size is 64GB for legacy 2-level page support */ |
779 | amdgpu_vm_adjust_size(adev, 64, 9); | |
fd66560b HZ |
780 | adev->vm_manager.num_level = 1; |
781 | } | |
782 | break; | |
783 | case CHIP_VEGA10: | |
e60f8db5 AX |
784 | /* XXX Don't know how to get VRAM type yet. */ |
785 | adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; | |
36b32a68 ZJ |
786 | /* |
787 | * To fulfill 4-level page support, | |
788 | * vm size is 256TB (48bit), maximum size of Vega10, | |
789 | * block size 512 (9bit) | |
790 | */ | |
fdd5faaa | 791 | adev->vm_manager.max_pfn = 1ULL << 36; |
36b32a68 | 792 | adev->vm_manager.block_size = 9; |
fd66560b | 793 | adev->vm_manager.num_level = 3; |
d07f14be | 794 | amdgpu_vm_set_fragment_size(adev, 9); |
fd66560b HZ |
795 | break; |
796 | default: | |
797 | break; | |
e60f8db5 AX |
798 | } |
799 | ||
fdd5faaa CK |
800 | DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n", |
801 | adev->vm_manager.max_pfn >> 18, adev->vm_manager.block_size, | |
802 | adev->vm_manager.fragment_size); | |
fd66560b | 803 | |
e60f8db5 AX |
804 | /* This interrupt is VMC page fault.*/ |
805 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, | |
806 | &adev->mc.vm_fault); | |
d7c434d3 FK |
807 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0, |
808 | &adev->mc.vm_fault); | |
e60f8db5 AX |
809 | |
810 | if (r) | |
811 | return r; | |
812 | ||
e60f8db5 AX |
813 | /* Set the internal MC address mask |
814 | * This is the max address of the GPU's | |
815 | * internal address space. | |
816 | */ | |
817 | adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ | |
818 | ||
916910ad HR |
819 | /* |
820 | * It needs to reserve 8M stolen memory for vega10 | |
821 | * TODO: Figure out how to avoid that... | |
822 | */ | |
823 | adev->mc.stolen_size = 8 * 1024 * 1024; | |
824 | ||
e60f8db5 AX |
825 | /* set DMA mask + need_dma32 flags. |
826 | * PCIE - can handle 44-bits. | |
827 | * IGP - can handle 44-bits | |
828 | * PCI - dma32 for legacy pci gart, 44 bits on vega10 | |
829 | */ | |
830 | adev->need_dma32 = false; | |
831 | dma_bits = adev->need_dma32 ? 32 : 44; | |
832 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
833 | if (r) { | |
834 | adev->need_dma32 = true; | |
835 | dma_bits = 32; | |
836 | printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); | |
837 | } | |
838 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
839 | if (r) { | |
840 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | |
841 | printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); | |
842 | } | |
843 | ||
844 | r = gmc_v9_0_mc_init(adev); | |
845 | if (r) | |
846 | return r; | |
847 | ||
848 | /* Memory manager */ | |
849 | r = amdgpu_bo_init(adev); | |
850 | if (r) | |
851 | return r; | |
852 | ||
853 | r = gmc_v9_0_gart_init(adev); | |
854 | if (r) | |
855 | return r; | |
856 | ||
05ec3eda CK |
857 | /* |
858 | * number of VMs | |
859 | * VMID 0 is reserved for System | |
860 | * amdgpu graphics/compute will use VMIDs 1-7 | |
861 | * amdkfd will use VMIDs 8-15 | |
862 | */ | |
863 | adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; | |
864 | adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; | |
865 | ||
05ec3eda CK |
866 | amdgpu_vm_manager_init(adev); |
867 | ||
868 | return 0; | |
e60f8db5 AX |
869 | } |
870 | ||
871 | /** | |
c79ee7d8 | 872 | * gmc_v9_0_gart_fini - vm fini callback |
e60f8db5 AX |
873 | * |
874 | * @adev: amdgpu_device pointer | |
875 | * | |
876 | * Tears down the driver GART/VM setup (CIK). | |
877 | */ | |
878 | static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) | |
879 | { | |
880 | amdgpu_gart_table_vram_free(adev); | |
881 | amdgpu_gart_fini(adev); | |
882 | } | |
883 | ||
884 | static int gmc_v9_0_sw_fini(void *handle) | |
885 | { | |
886 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
887 | ||
f59548c8 | 888 | amdgpu_gem_force_release(adev); |
05ec3eda | 889 | amdgpu_vm_manager_fini(adev); |
e60f8db5 | 890 | gmc_v9_0_gart_fini(adev); |
e60f8db5 AX |
891 | amdgpu_bo_fini(adev); |
892 | ||
893 | return 0; | |
894 | } | |
895 | ||
896 | static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) | |
897 | { | |
898 | switch (adev->asic_type) { | |
899 | case CHIP_VEGA10: | |
5c583018 EQ |
900 | amdgpu_program_register_sequence(adev, |
901 | golden_settings_mmhub_1_0_0, | |
c47b41a7 | 902 | ARRAY_SIZE(golden_settings_mmhub_1_0_0)); |
5c583018 EQ |
903 | amdgpu_program_register_sequence(adev, |
904 | golden_settings_athub_1_0_0, | |
c47b41a7 | 905 | ARRAY_SIZE(golden_settings_athub_1_0_0)); |
e60f8db5 | 906 | break; |
e4f3abaa | 907 | case CHIP_RAVEN: |
5c583018 EQ |
908 | amdgpu_program_register_sequence(adev, |
909 | golden_settings_athub_1_0_0, | |
c47b41a7 | 910 | ARRAY_SIZE(golden_settings_athub_1_0_0)); |
e4f3abaa | 911 | break; |
e60f8db5 AX |
912 | default: |
913 | break; | |
914 | } | |
915 | } | |
916 | ||
917 | /** | |
918 | * gmc_v9_0_gart_enable - gart enable | |
919 | * | |
920 | * @adev: amdgpu_device pointer | |
921 | */ | |
922 | static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) | |
923 | { | |
924 | int r; | |
925 | bool value; | |
926 | u32 tmp; | |
927 | ||
928 | amdgpu_program_register_sequence(adev, | |
929 | golden_settings_vega10_hdp, | |
c47b41a7 | 930 | ARRAY_SIZE(golden_settings_vega10_hdp)); |
e60f8db5 AX |
931 | |
932 | if (adev->gart.robj == NULL) { | |
933 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); | |
934 | return -EINVAL; | |
935 | } | |
ce1b1b66 ML |
936 | r = amdgpu_gart_table_vram_pin(adev); |
937 | if (r) | |
938 | return r; | |
e60f8db5 | 939 | |
2fcd43ce HZ |
940 | switch (adev->asic_type) { |
941 | case CHIP_RAVEN: | |
942 | mmhub_v1_0_initialize_power_gating(adev); | |
f8386b35 | 943 | mmhub_v1_0_update_power_gating(adev, true); |
2fcd43ce HZ |
944 | break; |
945 | default: | |
946 | break; | |
947 | } | |
948 | ||
e60f8db5 AX |
949 | r = gfxhub_v1_0_gart_enable(adev); |
950 | if (r) | |
951 | return r; | |
952 | ||
953 | r = mmhub_v1_0_gart_enable(adev); | |
954 | if (r) | |
955 | return r; | |
956 | ||
846347c9 | 957 | WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); |
e60f8db5 | 958 | |
b9509c80 HR |
959 | tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); |
960 | WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); | |
e60f8db5 | 961 | |
1d4e0a8c ML |
962 | /* After HDP is initialized, flush HDP.*/ |
963 | if (adev->flags & AMD_IS_APU) | |
964 | nbio_v7_0_hdp_flush(adev); | |
965 | else | |
966 | nbio_v6_1_hdp_flush(adev); | |
967 | ||
e60f8db5 AX |
968 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
969 | value = false; | |
970 | else | |
971 | value = true; | |
972 | ||
973 | gfxhub_v1_0_set_fault_enable_default(adev, value); | |
974 | mmhub_v1_0_set_fault_enable_default(adev, value); | |
e60f8db5 AX |
975 | gmc_v9_0_gart_flush_gpu_tlb(adev, 0); |
976 | ||
977 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | |
6f02a696 | 978 | (unsigned)(adev->mc.gart_size >> 20), |
e60f8db5 AX |
979 | (unsigned long long)adev->gart.table_addr); |
980 | adev->gart.ready = true; | |
981 | return 0; | |
982 | } | |
983 | ||
984 | static int gmc_v9_0_hw_init(void *handle) | |
985 | { | |
986 | int r; | |
987 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
988 | ||
989 | /* The sequence of these two function calls matters.*/ | |
990 | gmc_v9_0_init_golden_registers(adev); | |
991 | ||
edca2d05 | 992 | if (adev->mode_info.num_crtc) { |
edca2d05 | 993 | /* Lockout access through VGA aperture*/ |
4d9c333a | 994 | WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); |
edca2d05 AD |
995 | |
996 | /* disable VGA render */ | |
4d9c333a | 997 | WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); |
edca2d05 AD |
998 | } |
999 | ||
e60f8db5 AX |
1000 | r = gmc_v9_0_gart_enable(adev); |
1001 | ||
1002 | return r; | |
1003 | } | |
1004 | ||
1005 | /** | |
1006 | * gmc_v9_0_gart_disable - gart disable | |
1007 | * | |
1008 | * @adev: amdgpu_device pointer | |
1009 | * | |
1010 | * This disables all VM page table. | |
1011 | */ | |
1012 | static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) | |
1013 | { | |
1014 | gfxhub_v1_0_gart_disable(adev); | |
1015 | mmhub_v1_0_gart_disable(adev); | |
ce1b1b66 | 1016 | amdgpu_gart_table_vram_unpin(adev); |
e60f8db5 AX |
1017 | } |
1018 | ||
1019 | static int gmc_v9_0_hw_fini(void *handle) | |
1020 | { | |
1021 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1022 | ||
5dd696ae TH |
1023 | if (amdgpu_sriov_vf(adev)) { |
1024 | /* full access mode, so don't touch any GMC register */ | |
1025 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); | |
1026 | return 0; | |
1027 | } | |
1028 | ||
e60f8db5 AX |
1029 | amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); |
1030 | gmc_v9_0_gart_disable(adev); | |
1031 | ||
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | static int gmc_v9_0_suspend(void *handle) | |
1036 | { | |
1037 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1038 | ||
f053cd47 | 1039 | return gmc_v9_0_hw_fini(adev); |
e60f8db5 AX |
1040 | } |
1041 | ||
1042 | static int gmc_v9_0_resume(void *handle) | |
1043 | { | |
1044 | int r; | |
1045 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1046 | ||
1047 | r = gmc_v9_0_hw_init(adev); | |
1048 | if (r) | |
1049 | return r; | |
1050 | ||
32601d48 | 1051 | amdgpu_vm_reset_all_ids(adev); |
e60f8db5 | 1052 | |
32601d48 | 1053 | return 0; |
e60f8db5 AX |
1054 | } |
1055 | ||
1056 | static bool gmc_v9_0_is_idle(void *handle) | |
1057 | { | |
1058 | /* MC is always ready in GMC v9.*/ | |
1059 | return true; | |
1060 | } | |
1061 | ||
1062 | static int gmc_v9_0_wait_for_idle(void *handle) | |
1063 | { | |
1064 | /* There is no need to wait for MC idle in GMC v9.*/ | |
1065 | return 0; | |
1066 | } | |
1067 | ||
1068 | static int gmc_v9_0_soft_reset(void *handle) | |
1069 | { | |
1070 | /* XXX for emulation.*/ | |
1071 | return 0; | |
1072 | } | |
1073 | ||
1074 | static int gmc_v9_0_set_clockgating_state(void *handle, | |
1075 | enum amd_clockgating_state state) | |
1076 | { | |
d5583d4f HR |
1077 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1078 | ||
1079 | return mmhub_v1_0_set_clockgating(adev, state); | |
e60f8db5 AX |
1080 | } |
1081 | ||
13052be5 HR |
1082 | static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) |
1083 | { | |
1084 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1085 | ||
1086 | mmhub_v1_0_get_clockgating(adev, flags); | |
1087 | } | |
1088 | ||
e60f8db5 AX |
1089 | static int gmc_v9_0_set_powergating_state(void *handle, |
1090 | enum amd_powergating_state state) | |
1091 | { | |
1092 | return 0; | |
1093 | } | |
1094 | ||
1095 | const struct amd_ip_funcs gmc_v9_0_ip_funcs = { | |
1096 | .name = "gmc_v9_0", | |
1097 | .early_init = gmc_v9_0_early_init, | |
1098 | .late_init = gmc_v9_0_late_init, | |
1099 | .sw_init = gmc_v9_0_sw_init, | |
1100 | .sw_fini = gmc_v9_0_sw_fini, | |
1101 | .hw_init = gmc_v9_0_hw_init, | |
1102 | .hw_fini = gmc_v9_0_hw_fini, | |
1103 | .suspend = gmc_v9_0_suspend, | |
1104 | .resume = gmc_v9_0_resume, | |
1105 | .is_idle = gmc_v9_0_is_idle, | |
1106 | .wait_for_idle = gmc_v9_0_wait_for_idle, | |
1107 | .soft_reset = gmc_v9_0_soft_reset, | |
1108 | .set_clockgating_state = gmc_v9_0_set_clockgating_state, | |
1109 | .set_powergating_state = gmc_v9_0_set_powergating_state, | |
13052be5 | 1110 | .get_clockgating_state = gmc_v9_0_get_clockgating_state, |
e60f8db5 AX |
1111 | }; |
1112 | ||
1113 | const struct amdgpu_ip_block_version gmc_v9_0_ip_block = | |
1114 | { | |
1115 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
1116 | .major = 9, | |
1117 | .minor = 0, | |
1118 | .rev = 0, | |
1119 | .funcs = &gmc_v9_0_ip_funcs, | |
1120 | }; |