]>
Commit | Line | Data |
---|---|---|
e60f8db5 AX |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
24 | #include "amdgpu.h" | |
25 | #include "gmc_v9_0.h" | |
26 | ||
27 | #include "vega10/soc15ip.h" | |
28 | #include "vega10/HDP/hdp_4_0_offset.h" | |
29 | #include "vega10/HDP/hdp_4_0_sh_mask.h" | |
30 | #include "vega10/GC/gc_9_0_sh_mask.h" | |
31 | #include "vega10/vega10_enum.h" | |
32 | ||
33 | #include "soc15_common.h" | |
34 | ||
35 | #include "nbio_v6_1.h" | |
aecbe64f | 36 | #include "nbio_v7_0.h" |
e60f8db5 AX |
37 | #include "gfxhub_v1_0.h" |
38 | #include "mmhub_v1_0.h" | |
39 | ||
40 | #define mmDF_CS_AON0_DramBaseAddress0 0x0044 | |
41 | #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 | |
42 | //DF_CS_AON0_DramBaseAddress0 | |
43 | #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 | |
44 | #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 | |
45 | #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 | |
46 | #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 | |
47 | #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc | |
48 | #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L | |
49 | #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L | |
50 | #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L | |
51 | #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L | |
52 | #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L | |
53 | ||
54 | /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ | |
55 | #define AMDGPU_NUM_OF_VMIDS 8 | |
56 | ||
57 | static const u32 golden_settings_vega10_hdp[] = | |
58 | { | |
59 | 0xf64, 0x0fffffff, 0x00000000, | |
60 | 0xf65, 0x0fffffff, 0x00000000, | |
61 | 0xf66, 0x0fffffff, 0x00000000, | |
62 | 0xf67, 0x0fffffff, 0x00000000, | |
63 | 0xf68, 0x0fffffff, 0x00000000, | |
64 | 0xf6a, 0x0fffffff, 0x00000000, | |
65 | 0xf6b, 0x0fffffff, 0x00000000, | |
66 | 0xf6c, 0x0fffffff, 0x00000000, | |
67 | 0xf6d, 0x0fffffff, 0x00000000, | |
68 | 0xf6e, 0x0fffffff, 0x00000000, | |
69 | }; | |
70 | ||
71 | static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, | |
72 | struct amdgpu_irq_src *src, | |
73 | unsigned type, | |
74 | enum amdgpu_interrupt_state state) | |
75 | { | |
76 | struct amdgpu_vmhub *hub; | |
77 | u32 tmp, reg, bits, i; | |
78 | ||
11250164 CK |
79 | bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
80 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
81 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
82 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
83 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
84 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
85 | VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; | |
86 | ||
e60f8db5 AX |
87 | switch (state) { |
88 | case AMDGPU_IRQ_STATE_DISABLE: | |
89 | /* MM HUB */ | |
90 | hub = &adev->vmhub[AMDGPU_MMHUB]; | |
e60f8db5 AX |
91 | for (i = 0; i< 16; i++) { |
92 | reg = hub->vm_context0_cntl + i; | |
93 | tmp = RREG32(reg); | |
94 | tmp &= ~bits; | |
95 | WREG32(reg, tmp); | |
96 | } | |
97 | ||
98 | /* GFX HUB */ | |
99 | hub = &adev->vmhub[AMDGPU_GFXHUB]; | |
e60f8db5 AX |
100 | for (i = 0; i < 16; i++) { |
101 | reg = hub->vm_context0_cntl + i; | |
102 | tmp = RREG32(reg); | |
103 | tmp &= ~bits; | |
104 | WREG32(reg, tmp); | |
105 | } | |
106 | break; | |
107 | case AMDGPU_IRQ_STATE_ENABLE: | |
108 | /* MM HUB */ | |
109 | hub = &adev->vmhub[AMDGPU_MMHUB]; | |
e60f8db5 AX |
110 | for (i = 0; i< 16; i++) { |
111 | reg = hub->vm_context0_cntl + i; | |
112 | tmp = RREG32(reg); | |
113 | tmp |= bits; | |
114 | WREG32(reg, tmp); | |
115 | } | |
116 | ||
117 | /* GFX HUB */ | |
118 | hub = &adev->vmhub[AMDGPU_GFXHUB]; | |
e60f8db5 AX |
119 | for (i = 0; i < 16; i++) { |
120 | reg = hub->vm_context0_cntl + i; | |
121 | tmp = RREG32(reg); | |
122 | tmp |= bits; | |
123 | WREG32(reg, tmp); | |
124 | } | |
125 | break; | |
126 | default: | |
127 | break; | |
128 | } | |
129 | ||
130 | return 0; | |
131 | } | |
132 | ||
133 | static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, | |
134 | struct amdgpu_irq_src *source, | |
135 | struct amdgpu_iv_entry *entry) | |
136 | { | |
5a9b8e8a | 137 | struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src]; |
4d6cbde3 | 138 | uint32_t status = 0; |
e60f8db5 AX |
139 | u64 addr; |
140 | ||
141 | addr = (u64)entry->src_data[0] << 12; | |
142 | addr |= ((u64)entry->src_data[1] & 0xf) << 44; | |
143 | ||
79a0c465 | 144 | if (!amdgpu_sriov_vf(adev)) { |
5a9b8e8a CK |
145 | status = RREG32(hub->vm_l2_pro_fault_status); |
146 | WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); | |
4d6cbde3 | 147 | } |
e60f8db5 | 148 | |
4d6cbde3 FK |
149 | if (printk_ratelimit()) { |
150 | dev_err(adev->dev, | |
151 | "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n", | |
152 | entry->vm_id_src ? "mmhub" : "gfxhub", | |
153 | entry->src_id, entry->ring_id, entry->vm_id, | |
154 | entry->pas_id); | |
155 | dev_err(adev->dev, " at page 0x%016llx from %d\n", | |
156 | addr, entry->client_id); | |
157 | if (!amdgpu_sriov_vf(adev)) | |
158 | dev_err(adev->dev, | |
159 | "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", | |
160 | status); | |
79a0c465 | 161 | } |
e60f8db5 AX |
162 | |
163 | return 0; | |
164 | } | |
165 | ||
166 | static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { | |
167 | .set = gmc_v9_0_vm_fault_interrupt_state, | |
168 | .process = gmc_v9_0_process_interrupt, | |
169 | }; | |
170 | ||
171 | static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) | |
172 | { | |
173 | adev->mc.vm_fault.num_types = 1; | |
174 | adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; | |
175 | } | |
176 | ||
03f89feb CK |
177 | static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id) |
178 | { | |
179 | u32 req = 0; | |
180 | ||
181 | /* invalidate using legacy mode on vm_id*/ | |
182 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, | |
183 | PER_VMID_INVALIDATE_REQ, 1 << vm_id); | |
184 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); | |
185 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); | |
186 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); | |
187 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); | |
188 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); | |
189 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); | |
190 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, | |
191 | CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); | |
192 | ||
193 | return req; | |
194 | } | |
195 | ||
e60f8db5 AX |
196 | /* |
197 | * GART | |
198 | * VMID 0 is the physical GPU addresses as used by the kernel. | |
199 | * VMIDs 1-15 are used for userspace clients and are handled | |
200 | * by the amdgpu vm/hsa code. | |
201 | */ | |
202 | ||
203 | /** | |
204 | * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback | |
205 | * | |
206 | * @adev: amdgpu_device pointer | |
207 | * @vmid: vm instance to flush | |
208 | * | |
209 | * Flush the TLB for the requested page table. | |
210 | */ | |
211 | static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, | |
212 | uint32_t vmid) | |
213 | { | |
214 | /* Use register 17 for GART */ | |
215 | const unsigned eng = 17; | |
216 | unsigned i, j; | |
217 | ||
218 | /* flush hdp cache */ | |
aecbe64f CZ |
219 | if (adev->flags & AMD_IS_APU) |
220 | nbio_v7_0_hdp_flush(adev); | |
221 | else | |
222 | nbio_v6_1_hdp_flush(adev); | |
e60f8db5 AX |
223 | |
224 | spin_lock(&adev->mc.invalidate_lock); | |
225 | ||
226 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | |
227 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; | |
03f89feb | 228 | u32 tmp = gmc_v9_0_get_invalidate_req(vmid); |
e60f8db5 | 229 | |
c7a7266b | 230 | WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); |
e60f8db5 AX |
231 | |
232 | /* Busy wait for ACK.*/ | |
233 | for (j = 0; j < 100; j++) { | |
c7a7266b | 234 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); |
e60f8db5 AX |
235 | tmp &= 1 << vmid; |
236 | if (tmp) | |
237 | break; | |
238 | cpu_relax(); | |
239 | } | |
240 | if (j < 100) | |
241 | continue; | |
242 | ||
243 | /* Wait for ACK with a delay.*/ | |
244 | for (j = 0; j < adev->usec_timeout; j++) { | |
c7a7266b | 245 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); |
e60f8db5 AX |
246 | tmp &= 1 << vmid; |
247 | if (tmp) | |
248 | break; | |
249 | udelay(1); | |
250 | } | |
251 | if (j < adev->usec_timeout) | |
252 | continue; | |
253 | ||
254 | DRM_ERROR("Timeout waiting for VM flush ACK!\n"); | |
255 | } | |
256 | ||
257 | spin_unlock(&adev->mc.invalidate_lock); | |
258 | } | |
259 | ||
260 | /** | |
261 | * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO | |
262 | * | |
263 | * @adev: amdgpu_device pointer | |
264 | * @cpu_pt_addr: cpu address of the page table | |
265 | * @gpu_page_idx: entry in the page table to update | |
266 | * @addr: dst addr to write into pte/pde | |
267 | * @flags: access flags | |
268 | * | |
269 | * Update the page tables using the CPU. | |
270 | */ | |
271 | static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev, | |
272 | void *cpu_pt_addr, | |
273 | uint32_t gpu_page_idx, | |
274 | uint64_t addr, | |
275 | uint64_t flags) | |
276 | { | |
277 | void __iomem *ptr = (void *)cpu_pt_addr; | |
278 | uint64_t value; | |
279 | ||
280 | /* | |
281 | * PTE format on VEGA 10: | |
282 | * 63:59 reserved | |
283 | * 58:57 mtype | |
284 | * 56 F | |
285 | * 55 L | |
286 | * 54 P | |
287 | * 53 SW | |
288 | * 52 T | |
289 | * 50:48 reserved | |
290 | * 47:12 4k physical page base address | |
291 | * 11:7 fragment | |
292 | * 6 write | |
293 | * 5 read | |
294 | * 4 exe | |
295 | * 3 Z | |
296 | * 2 snooped | |
297 | * 1 system | |
298 | * 0 valid | |
299 | * | |
300 | * PDE format on VEGA 10: | |
301 | * 63:59 block fragment size | |
302 | * 58:55 reserved | |
303 | * 54 P | |
304 | * 53:48 reserved | |
305 | * 47:6 physical base address of PD or PTE | |
306 | * 5:3 reserved | |
307 | * 2 C | |
308 | * 1 system | |
309 | * 0 valid | |
310 | */ | |
311 | ||
312 | /* | |
313 | * The following is for PTE only. GART does not have PDEs. | |
314 | */ | |
315 | value = addr & 0x0000FFFFFFFFF000ULL; | |
316 | value |= flags; | |
317 | writeq(value, ptr + (gpu_page_idx * 8)); | |
318 | return 0; | |
319 | } | |
320 | ||
321 | static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, | |
322 | uint32_t flags) | |
323 | ||
324 | { | |
325 | uint64_t pte_flag = 0; | |
326 | ||
327 | if (flags & AMDGPU_VM_PAGE_EXECUTABLE) | |
328 | pte_flag |= AMDGPU_PTE_EXECUTABLE; | |
329 | if (flags & AMDGPU_VM_PAGE_READABLE) | |
330 | pte_flag |= AMDGPU_PTE_READABLE; | |
331 | if (flags & AMDGPU_VM_PAGE_WRITEABLE) | |
332 | pte_flag |= AMDGPU_PTE_WRITEABLE; | |
333 | ||
334 | switch (flags & AMDGPU_VM_MTYPE_MASK) { | |
335 | case AMDGPU_VM_MTYPE_DEFAULT: | |
336 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
337 | break; | |
338 | case AMDGPU_VM_MTYPE_NC: | |
339 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
340 | break; | |
341 | case AMDGPU_VM_MTYPE_WC: | |
342 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC); | |
343 | break; | |
344 | case AMDGPU_VM_MTYPE_CC: | |
345 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC); | |
346 | break; | |
347 | case AMDGPU_VM_MTYPE_UC: | |
348 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC); | |
349 | break; | |
350 | default: | |
351 | pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC); | |
352 | break; | |
353 | } | |
354 | ||
355 | if (flags & AMDGPU_VM_PAGE_PRT) | |
356 | pte_flag |= AMDGPU_PTE_PRT; | |
357 | ||
358 | return pte_flag; | |
359 | } | |
360 | ||
b1166325 | 361 | static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr) |
e60f8db5 | 362 | { |
b1166325 CK |
363 | addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start; |
364 | BUG_ON(addr & 0xFFFF00000000003FULL); | |
365 | return addr; | |
e60f8db5 AX |
366 | } |
367 | ||
f75e237c CK |
368 | static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { |
369 | .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, | |
370 | .set_pte_pde = gmc_v9_0_gart_set_pte_pde, | |
03f89feb | 371 | .get_invalidate_req = gmc_v9_0_get_invalidate_req, |
b1166325 CK |
372 | .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, |
373 | .get_vm_pde = gmc_v9_0_get_vm_pde | |
e60f8db5 AX |
374 | }; |
375 | ||
f75e237c | 376 | static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) |
e60f8db5 | 377 | { |
f75e237c CK |
378 | if (adev->gart.gart_funcs == NULL) |
379 | adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; | |
e60f8db5 AX |
380 | } |
381 | ||
382 | static int gmc_v9_0_early_init(void *handle) | |
383 | { | |
384 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
385 | ||
386 | gmc_v9_0_set_gart_funcs(adev); | |
e60f8db5 AX |
387 | gmc_v9_0_set_irq_funcs(adev); |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
392 | static int gmc_v9_0_late_init(void *handle) | |
393 | { | |
394 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
43b9176f | 395 | unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 }; |
4789c463 CK |
396 | unsigned i; |
397 | ||
398 | for(i = 0; i < adev->num_rings; ++i) { | |
399 | struct amdgpu_ring *ring = adev->rings[i]; | |
400 | unsigned vmhub = ring->funcs->vmhub; | |
401 | ||
402 | ring->vm_inv_eng = vm_inv_eng[vmhub]++; | |
775f55f1 TSD |
403 | dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", |
404 | ring->idx, ring->name, ring->vm_inv_eng, | |
405 | ring->funcs->vmhub); | |
4789c463 CK |
406 | } |
407 | ||
408 | /* Engine 17 is used for GART flushes */ | |
409 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) | |
410 | BUG_ON(vm_inv_eng[i] > 17); | |
411 | ||
e60f8db5 AX |
412 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); |
413 | } | |
414 | ||
415 | static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, | |
416 | struct amdgpu_mc *mc) | |
417 | { | |
eeb2487d ML |
418 | u64 base = 0; |
419 | if (!amdgpu_sriov_vf(adev)) | |
420 | base = mmhub_v1_0_get_fb_location(adev); | |
e60f8db5 AX |
421 | amdgpu_vram_location(adev, &adev->mc, base); |
422 | adev->mc.gtt_base_align = 0; | |
423 | amdgpu_gtt_location(adev, mc); | |
bc099ee9 CZ |
424 | /* base offset of vram pages */ |
425 | if (adev->flags & AMD_IS_APU) | |
426 | adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); | |
427 | else | |
428 | adev->vm_manager.vram_base_offset = 0; | |
e60f8db5 AX |
429 | } |
430 | ||
431 | /** | |
432 | * gmc_v9_0_mc_init - initialize the memory controller driver params | |
433 | * | |
434 | * @adev: amdgpu_device pointer | |
435 | * | |
436 | * Look up the amount of vram, vram width, and decide how to place | |
437 | * vram and gart within the GPU's physical address space. | |
438 | * Returns 0 for success. | |
439 | */ | |
440 | static int gmc_v9_0_mc_init(struct amdgpu_device *adev) | |
441 | { | |
442 | u32 tmp; | |
443 | int chansize, numchan; | |
444 | ||
445 | /* hbm memory channel size */ | |
446 | chansize = 128; | |
447 | ||
b9509c80 | 448 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); |
e60f8db5 AX |
449 | tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; |
450 | tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; | |
451 | switch (tmp) { | |
452 | case 0: | |
453 | default: | |
454 | numchan = 1; | |
455 | break; | |
456 | case 1: | |
457 | numchan = 2; | |
458 | break; | |
459 | case 2: | |
460 | numchan = 0; | |
461 | break; | |
462 | case 3: | |
463 | numchan = 4; | |
464 | break; | |
465 | case 4: | |
466 | numchan = 0; | |
467 | break; | |
468 | case 5: | |
469 | numchan = 8; | |
470 | break; | |
471 | case 6: | |
472 | numchan = 0; | |
473 | break; | |
474 | case 7: | |
475 | numchan = 16; | |
476 | break; | |
477 | case 8: | |
478 | numchan = 2; | |
479 | break; | |
480 | } | |
481 | adev->mc.vram_width = numchan * chansize; | |
482 | ||
483 | /* Could aper size report 0 ? */ | |
484 | adev->mc.aper_base = pci_resource_start(adev->pdev, 0); | |
485 | adev->mc.aper_size = pci_resource_len(adev->pdev, 0); | |
486 | /* size in MB on si */ | |
487 | adev->mc.mc_vram_size = | |
aecbe64f CZ |
488 | ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) : |
489 | nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL; | |
e60f8db5 AX |
490 | adev->mc.real_vram_size = adev->mc.mc_vram_size; |
491 | adev->mc.visible_vram_size = adev->mc.aper_size; | |
492 | ||
493 | /* In case the PCI BAR is larger than the actual amount of vram */ | |
494 | if (adev->mc.visible_vram_size > adev->mc.real_vram_size) | |
495 | adev->mc.visible_vram_size = adev->mc.real_vram_size; | |
496 | ||
497 | /* unless the user had overridden it, set the gart | |
498 | * size equal to the 1024 or vram, whichever is larger. | |
499 | */ | |
500 | if (amdgpu_gart_size == -1) | |
55ed8caf CZ |
501 | adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), |
502 | adev->mc.mc_vram_size); | |
e60f8db5 AX |
503 | else |
504 | adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; | |
505 | ||
506 | gmc_v9_0_vram_gtt_location(adev, &adev->mc); | |
507 | ||
508 | return 0; | |
509 | } | |
510 | ||
511 | static int gmc_v9_0_gart_init(struct amdgpu_device *adev) | |
512 | { | |
513 | int r; | |
514 | ||
515 | if (adev->gart.robj) { | |
516 | WARN(1, "VEGA10 PCIE GART already initialized\n"); | |
517 | return 0; | |
518 | } | |
519 | /* Initialize common gart structure */ | |
520 | r = amdgpu_gart_init(adev); | |
521 | if (r) | |
522 | return r; | |
523 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | |
524 | adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) | | |
525 | AMDGPU_PTE_EXECUTABLE; | |
526 | return amdgpu_gart_table_vram_alloc(adev); | |
527 | } | |
528 | ||
e60f8db5 AX |
529 | static int gmc_v9_0_sw_init(void *handle) |
530 | { | |
531 | int r; | |
532 | int dma_bits; | |
533 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
534 | ||
0c8c0847 | 535 | gfxhub_v1_0_init(adev); |
77f6c763 | 536 | mmhub_v1_0_init(adev); |
0c8c0847 | 537 | |
e60f8db5 AX |
538 | spin_lock_init(&adev->mc.invalidate_lock); |
539 | ||
540 | if (adev->flags & AMD_IS_APU) { | |
541 | adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; | |
bab4fee7 | 542 | amdgpu_vm_adjust_size(adev, 64); |
e60f8db5 AX |
543 | } else { |
544 | /* XXX Don't know how to get VRAM type yet. */ | |
545 | adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; | |
36b32a68 ZJ |
546 | /* |
547 | * To fulfill 4-level page support, | |
548 | * vm size is 256TB (48bit), maximum size of Vega10, | |
549 | * block size 512 (9bit) | |
550 | */ | |
551 | adev->vm_manager.vm_size = 1U << 18; | |
552 | adev->vm_manager.block_size = 9; | |
bab4fee7 JZ |
553 | DRM_INFO("vm size is %llu GB, block size is %u-bit\n", |
554 | adev->vm_manager.vm_size, | |
555 | adev->vm_manager.block_size); | |
e60f8db5 AX |
556 | } |
557 | ||
558 | /* This interrupt is VMC page fault.*/ | |
559 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, | |
560 | &adev->mc.vm_fault); | |
d7c434d3 FK |
561 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0, |
562 | &adev->mc.vm_fault); | |
e60f8db5 AX |
563 | |
564 | if (r) | |
565 | return r; | |
566 | ||
36b32a68 | 567 | adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; |
e60f8db5 AX |
568 | |
569 | /* Set the internal MC address mask | |
570 | * This is the max address of the GPU's | |
571 | * internal address space. | |
572 | */ | |
573 | adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ | |
574 | ||
916910ad HR |
575 | /* |
576 | * It needs to reserve 8M stolen memory for vega10 | |
577 | * TODO: Figure out how to avoid that... | |
578 | */ | |
579 | adev->mc.stolen_size = 8 * 1024 * 1024; | |
580 | ||
e60f8db5 AX |
581 | /* set DMA mask + need_dma32 flags. |
582 | * PCIE - can handle 44-bits. | |
583 | * IGP - can handle 44-bits | |
584 | * PCI - dma32 for legacy pci gart, 44 bits on vega10 | |
585 | */ | |
586 | adev->need_dma32 = false; | |
587 | dma_bits = adev->need_dma32 ? 32 : 44; | |
588 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
589 | if (r) { | |
590 | adev->need_dma32 = true; | |
591 | dma_bits = 32; | |
592 | printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); | |
593 | } | |
594 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | |
595 | if (r) { | |
596 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | |
597 | printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); | |
598 | } | |
599 | ||
600 | r = gmc_v9_0_mc_init(adev); | |
601 | if (r) | |
602 | return r; | |
603 | ||
604 | /* Memory manager */ | |
605 | r = amdgpu_bo_init(adev); | |
606 | if (r) | |
607 | return r; | |
608 | ||
609 | r = gmc_v9_0_gart_init(adev); | |
610 | if (r) | |
611 | return r; | |
612 | ||
05ec3eda CK |
613 | /* |
614 | * number of VMs | |
615 | * VMID 0 is reserved for System | |
616 | * amdgpu graphics/compute will use VMIDs 1-7 | |
617 | * amdkfd will use VMIDs 8-15 | |
618 | */ | |
619 | adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; | |
620 | adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; | |
621 | ||
622 | /* TODO: fix num_level for APU when updating vm size and block size */ | |
623 | if (adev->flags & AMD_IS_APU) | |
624 | adev->vm_manager.num_level = 1; | |
625 | else | |
626 | adev->vm_manager.num_level = 3; | |
627 | amdgpu_vm_manager_init(adev); | |
628 | ||
629 | return 0; | |
e60f8db5 AX |
630 | } |
631 | ||
632 | /** | |
633 | * gmc_v8_0_gart_fini - vm fini callback | |
634 | * | |
635 | * @adev: amdgpu_device pointer | |
636 | * | |
637 | * Tears down the driver GART/VM setup (CIK). | |
638 | */ | |
639 | static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) | |
640 | { | |
641 | amdgpu_gart_table_vram_free(adev); | |
642 | amdgpu_gart_fini(adev); | |
643 | } | |
644 | ||
645 | static int gmc_v9_0_sw_fini(void *handle) | |
646 | { | |
647 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
648 | ||
05ec3eda | 649 | amdgpu_vm_manager_fini(adev); |
e60f8db5 AX |
650 | gmc_v9_0_gart_fini(adev); |
651 | amdgpu_gem_force_release(adev); | |
652 | amdgpu_bo_fini(adev); | |
653 | ||
654 | return 0; | |
655 | } | |
656 | ||
657 | static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) | |
658 | { | |
659 | switch (adev->asic_type) { | |
660 | case CHIP_VEGA10: | |
661 | break; | |
e4f3abaa CZ |
662 | case CHIP_RAVEN: |
663 | break; | |
e60f8db5 AX |
664 | default: |
665 | break; | |
666 | } | |
667 | } | |
668 | ||
669 | /** | |
670 | * gmc_v9_0_gart_enable - gart enable | |
671 | * | |
672 | * @adev: amdgpu_device pointer | |
673 | */ | |
674 | static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) | |
675 | { | |
676 | int r; | |
677 | bool value; | |
678 | u32 tmp; | |
679 | ||
680 | amdgpu_program_register_sequence(adev, | |
681 | golden_settings_vega10_hdp, | |
682 | (const u32)ARRAY_SIZE(golden_settings_vega10_hdp)); | |
683 | ||
684 | if (adev->gart.robj == NULL) { | |
685 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); | |
686 | return -EINVAL; | |
687 | } | |
688 | r = amdgpu_gart_table_vram_pin(adev); | |
689 | if (r) | |
690 | return r; | |
691 | ||
692 | /* After HDP is initialized, flush HDP.*/ | |
aecbe64f CZ |
693 | if (adev->flags & AMD_IS_APU) |
694 | nbio_v7_0_hdp_flush(adev); | |
695 | else | |
696 | nbio_v6_1_hdp_flush(adev); | |
e60f8db5 | 697 | |
2fcd43ce HZ |
698 | switch (adev->asic_type) { |
699 | case CHIP_RAVEN: | |
700 | mmhub_v1_0_initialize_power_gating(adev); | |
f8386b35 | 701 | mmhub_v1_0_update_power_gating(adev, true); |
2fcd43ce HZ |
702 | break; |
703 | default: | |
704 | break; | |
705 | } | |
706 | ||
e60f8db5 AX |
707 | r = gfxhub_v1_0_gart_enable(adev); |
708 | if (r) | |
709 | return r; | |
710 | ||
711 | r = mmhub_v1_0_gart_enable(adev); | |
712 | if (r) | |
713 | return r; | |
714 | ||
b9509c80 | 715 | tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); |
e60f8db5 | 716 | tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; |
b9509c80 | 717 | WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); |
e60f8db5 | 718 | |
b9509c80 HR |
719 | tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); |
720 | WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); | |
e60f8db5 AX |
721 | |
722 | ||
723 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) | |
724 | value = false; | |
725 | else | |
726 | value = true; | |
727 | ||
728 | gfxhub_v1_0_set_fault_enable_default(adev, value); | |
729 | mmhub_v1_0_set_fault_enable_default(adev, value); | |
730 | ||
731 | gmc_v9_0_gart_flush_gpu_tlb(adev, 0); | |
732 | ||
733 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | |
734 | (unsigned)(adev->mc.gtt_size >> 20), | |
735 | (unsigned long long)adev->gart.table_addr); | |
736 | adev->gart.ready = true; | |
737 | return 0; | |
738 | } | |
739 | ||
740 | static int gmc_v9_0_hw_init(void *handle) | |
741 | { | |
742 | int r; | |
743 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
744 | ||
745 | /* The sequence of these two function calls matters.*/ | |
746 | gmc_v9_0_init_golden_registers(adev); | |
747 | ||
748 | r = gmc_v9_0_gart_enable(adev); | |
749 | ||
750 | return r; | |
751 | } | |
752 | ||
753 | /** | |
754 | * gmc_v9_0_gart_disable - gart disable | |
755 | * | |
756 | * @adev: amdgpu_device pointer | |
757 | * | |
758 | * This disables all VM page table. | |
759 | */ | |
760 | static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) | |
761 | { | |
762 | gfxhub_v1_0_gart_disable(adev); | |
763 | mmhub_v1_0_gart_disable(adev); | |
764 | amdgpu_gart_table_vram_unpin(adev); | |
765 | } | |
766 | ||
767 | static int gmc_v9_0_hw_fini(void *handle) | |
768 | { | |
769 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
770 | ||
5dd696ae TH |
771 | if (amdgpu_sriov_vf(adev)) { |
772 | /* full access mode, so don't touch any GMC register */ | |
773 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); | |
774 | return 0; | |
775 | } | |
776 | ||
e60f8db5 AX |
777 | amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); |
778 | gmc_v9_0_gart_disable(adev); | |
779 | ||
780 | return 0; | |
781 | } | |
782 | ||
783 | static int gmc_v9_0_suspend(void *handle) | |
784 | { | |
785 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
786 | ||
e60f8db5 AX |
787 | gmc_v9_0_hw_fini(adev); |
788 | ||
789 | return 0; | |
790 | } | |
791 | ||
792 | static int gmc_v9_0_resume(void *handle) | |
793 | { | |
794 | int r; | |
795 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
796 | ||
797 | r = gmc_v9_0_hw_init(adev); | |
798 | if (r) | |
799 | return r; | |
800 | ||
32601d48 | 801 | amdgpu_vm_reset_all_ids(adev); |
e60f8db5 | 802 | |
32601d48 | 803 | return 0; |
e60f8db5 AX |
804 | } |
805 | ||
806 | static bool gmc_v9_0_is_idle(void *handle) | |
807 | { | |
808 | /* MC is always ready in GMC v9.*/ | |
809 | return true; | |
810 | } | |
811 | ||
812 | static int gmc_v9_0_wait_for_idle(void *handle) | |
813 | { | |
814 | /* There is no need to wait for MC idle in GMC v9.*/ | |
815 | return 0; | |
816 | } | |
817 | ||
818 | static int gmc_v9_0_soft_reset(void *handle) | |
819 | { | |
820 | /* XXX for emulation.*/ | |
821 | return 0; | |
822 | } | |
823 | ||
824 | static int gmc_v9_0_set_clockgating_state(void *handle, | |
825 | enum amd_clockgating_state state) | |
826 | { | |
d5583d4f HR |
827 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
828 | ||
829 | return mmhub_v1_0_set_clockgating(adev, state); | |
e60f8db5 AX |
830 | } |
831 | ||
13052be5 HR |
832 | static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) |
833 | { | |
834 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
835 | ||
836 | mmhub_v1_0_get_clockgating(adev, flags); | |
837 | } | |
838 | ||
e60f8db5 AX |
839 | static int gmc_v9_0_set_powergating_state(void *handle, |
840 | enum amd_powergating_state state) | |
841 | { | |
842 | return 0; | |
843 | } | |
844 | ||
845 | const struct amd_ip_funcs gmc_v9_0_ip_funcs = { | |
846 | .name = "gmc_v9_0", | |
847 | .early_init = gmc_v9_0_early_init, | |
848 | .late_init = gmc_v9_0_late_init, | |
849 | .sw_init = gmc_v9_0_sw_init, | |
850 | .sw_fini = gmc_v9_0_sw_fini, | |
851 | .hw_init = gmc_v9_0_hw_init, | |
852 | .hw_fini = gmc_v9_0_hw_fini, | |
853 | .suspend = gmc_v9_0_suspend, | |
854 | .resume = gmc_v9_0_resume, | |
855 | .is_idle = gmc_v9_0_is_idle, | |
856 | .wait_for_idle = gmc_v9_0_wait_for_idle, | |
857 | .soft_reset = gmc_v9_0_soft_reset, | |
858 | .set_clockgating_state = gmc_v9_0_set_clockgating_state, | |
859 | .set_powergating_state = gmc_v9_0_set_powergating_state, | |
13052be5 | 860 | .get_clockgating_state = gmc_v9_0_get_clockgating_state, |
e60f8db5 AX |
861 | }; |
862 | ||
863 | const struct amdgpu_ip_block_version gmc_v9_0_ip_block = | |
864 | { | |
865 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
866 | .major = 9, | |
867 | .minor = 0, | |
868 | .rev = 0, | |
869 | .funcs = &gmc_v9_0_ip_funcs, | |
870 | }; |