]>
Commit | Line | Data |
---|---|---|
f9df67e9 HZ |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
d7929c1e | 24 | #include <linux/pci.h> |
f9df67e9 HZ |
25 | #include "amdgpu.h" |
26 | #include "amdgpu_atomfirmware.h" | |
27 | #include "gmc_v10_0.h" | |
28 | ||
29 | #include "hdp/hdp_5_0_0_offset.h" | |
30 | #include "hdp/hdp_5_0_0_sh_mask.h" | |
31 | #include "gc/gc_10_1_0_sh_mask.h" | |
32 | #include "mmhub/mmhub_2_0_0_sh_mask.h" | |
33 | #include "dcn/dcn_2_0_0_offset.h" | |
34 | #include "dcn/dcn_2_0_0_sh_mask.h" | |
35 | #include "oss/osssys_5_0_0_offset.h" | |
36 | #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" | |
37 | #include "navi10_enum.h" | |
38 | ||
39 | #include "soc15.h" | |
40 | #include "soc15_common.h" | |
41 | ||
42 | #include "nbio_v2_3.h" | |
43 | ||
44 | #include "gfxhub_v2_0.h" | |
45 | #include "mmhub_v2_0.h" | |
46 | #include "athub_v2_0.h" | |
47 | /* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/ | |
48 | #define AMDGPU_NUM_OF_VMIDS 8 | |
49 | ||
50 | #if 0 | |
51 | static const struct soc15_reg_golden golden_settings_navi10_hdp[] = | |
52 | { | |
53 | /* TODO add golden setting for hdp */ | |
54 | }; | |
55 | #endif | |
56 | ||
57 | static int | |
58 | gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, | |
59 | struct amdgpu_irq_src *src, unsigned type, | |
60 | enum amdgpu_interrupt_state state) | |
61 | { | |
62 | struct amdgpu_vmhub *hub; | |
63 | u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i; | |
64 | ||
a2d15ed7 | 65 | bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
f9df67e9 HZ |
66 | GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
67 | GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
68 | GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
69 | GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
70 | GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
71 | GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; | |
72 | ||
a2d15ed7 | 73 | bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
f9df67e9 HZ |
74 | MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
75 | MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
76 | MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
77 | MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
78 | MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | |
79 | MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; | |
80 | ||
81 | switch (state) { | |
82 | case AMDGPU_IRQ_STATE_DISABLE: | |
83 | /* MM HUB */ | |
a2d15ed7 | 84 | hub = &adev->vmhub[AMDGPU_MMHUB_0]; |
f9df67e9 HZ |
85 | for (i = 0; i < 16; i++) { |
86 | reg = hub->vm_context0_cntl + i; | |
87 | tmp = RREG32(reg); | |
a2d15ed7 | 88 | tmp &= ~bits[AMDGPU_MMHUB_0]; |
f9df67e9 HZ |
89 | WREG32(reg, tmp); |
90 | } | |
91 | ||
92 | /* GFX HUB */ | |
a2d15ed7 | 93 | hub = &adev->vmhub[AMDGPU_GFXHUB_0]; |
f9df67e9 HZ |
94 | for (i = 0; i < 16; i++) { |
95 | reg = hub->vm_context0_cntl + i; | |
96 | tmp = RREG32(reg); | |
a2d15ed7 | 97 | tmp &= ~bits[AMDGPU_GFXHUB_0]; |
f9df67e9 HZ |
98 | WREG32(reg, tmp); |
99 | } | |
100 | break; | |
101 | case AMDGPU_IRQ_STATE_ENABLE: | |
102 | /* MM HUB */ | |
a2d15ed7 | 103 | hub = &adev->vmhub[AMDGPU_MMHUB_0]; |
f9df67e9 HZ |
104 | for (i = 0; i < 16; i++) { |
105 | reg = hub->vm_context0_cntl + i; | |
106 | tmp = RREG32(reg); | |
a2d15ed7 | 107 | tmp |= bits[AMDGPU_MMHUB_0]; |
f9df67e9 HZ |
108 | WREG32(reg, tmp); |
109 | } | |
110 | ||
111 | /* GFX HUB */ | |
a2d15ed7 | 112 | hub = &adev->vmhub[AMDGPU_GFXHUB_0]; |
f9df67e9 HZ |
113 | for (i = 0; i < 16; i++) { |
114 | reg = hub->vm_context0_cntl + i; | |
115 | tmp = RREG32(reg); | |
a2d15ed7 | 116 | tmp |= bits[AMDGPU_GFXHUB_0]; |
f9df67e9 HZ |
117 | WREG32(reg, tmp); |
118 | } | |
119 | break; | |
120 | default: | |
121 | break; | |
122 | } | |
123 | ||
124 | return 0; | |
125 | } | |
126 | ||
127 | static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, | |
128 | struct amdgpu_irq_src *source, | |
129 | struct amdgpu_iv_entry *entry) | |
130 | { | |
131 | struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; | |
132 | uint32_t status = 0; | |
133 | u64 addr; | |
134 | ||
135 | addr = (u64)entry->src_data[0] << 12; | |
136 | addr |= ((u64)entry->src_data[1] & 0xf) << 44; | |
137 | ||
138 | if (!amdgpu_sriov_vf(adev)) { | |
53499173 XY |
139 | /* |
140 | * Issue a dummy read to wait for the status register to | |
141 | * be updated to avoid reading an incorrect value due to | |
142 | * the new fast GRBM interface. | |
143 | */ | |
144 | if (entry->vmid_src == AMDGPU_GFXHUB_0) | |
145 | RREG32(hub->vm_l2_pro_fault_status); | |
146 | ||
f9df67e9 HZ |
147 | status = RREG32(hub->vm_l2_pro_fault_status); |
148 | WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); | |
149 | } | |
150 | ||
151 | if (printk_ratelimit()) { | |
5d36d4c9 YZ |
152 | struct amdgpu_task_info task_info; |
153 | ||
154 | memset(&task_info, 0, sizeof(struct amdgpu_task_info)); | |
155 | amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); | |
156 | ||
f9df67e9 | 157 | dev_err(adev->dev, |
5d36d4c9 YZ |
158 | "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " |
159 | "for process %s pid %d thread %s pid %d)\n", | |
f9df67e9 HZ |
160 | entry->vmid_src ? "mmhub" : "gfxhub", |
161 | entry->src_id, entry->ring_id, entry->vmid, | |
5d36d4c9 YZ |
162 | entry->pasid, task_info.process_name, task_info.tgid, |
163 | task_info.task_name, task_info.pid); | |
164 | dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", | |
f9df67e9 | 165 | addr, entry->client_id); |
5d36d4c9 | 166 | if (!amdgpu_sriov_vf(adev)) { |
f9df67e9 | 167 | dev_err(adev->dev, |
5d36d4c9 | 168 | "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", |
f9df67e9 | 169 | status); |
5d36d4c9 YZ |
170 | dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", |
171 | REG_GET_FIELD(status, | |
172 | GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); | |
173 | dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", | |
174 | REG_GET_FIELD(status, | |
175 | GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); | |
176 | dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", | |
177 | REG_GET_FIELD(status, | |
178 | GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); | |
179 | dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", | |
180 | REG_GET_FIELD(status, | |
181 | GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); | |
182 | dev_err(adev->dev, "\t RW: 0x%lx\n", | |
183 | REG_GET_FIELD(status, | |
184 | GCVM_L2_PROTECTION_FAULT_STATUS, RW)); | |
185 | } | |
f9df67e9 HZ |
186 | } |
187 | ||
188 | return 0; | |
189 | } | |
190 | ||
191 | static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = { | |
192 | .set = gmc_v10_0_vm_fault_interrupt_state, | |
193 | .process = gmc_v10_0_process_interrupt, | |
194 | }; | |
195 | ||
196 | static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) | |
197 | { | |
198 | adev->gmc.vm_fault.num_types = 1; | |
199 | adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; | |
200 | } | |
201 | ||
202 | static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, | |
203 | uint32_t flush_type) | |
204 | { | |
205 | u32 req = 0; | |
206 | ||
207 | /* invalidate using legacy mode on vmid*/ | |
208 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, | |
209 | PER_VMID_INVALIDATE_REQ, 1 << vmid); | |
210 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); | |
211 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); | |
212 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); | |
213 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); | |
214 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); | |
215 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); | |
216 | req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, | |
217 | CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); | |
218 | ||
219 | return req; | |
220 | } | |
221 | ||
222 | /* | |
223 | * GART | |
224 | * VMID 0 is the physical GPU addresses as used by the kernel. | |
225 | * VMIDs 1-15 are used for userspace clients and are handled | |
226 | * by the amdgpu vm/hsa code. | |
227 | */ | |
228 | ||
229 | static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, | |
230 | unsigned int vmhub, uint32_t flush_type) | |
231 | { | |
232 | struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; | |
233 | u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); | |
234 | /* Use register 17 for GART */ | |
235 | const unsigned eng = 17; | |
236 | unsigned int i; | |
237 | ||
238 | WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); | |
239 | ||
53499173 XY |
240 | /* |
241 | * Issue a dummy read to wait for the ACK register to be cleared | |
242 | * to avoid a false ACK due to the new fast GRBM interface. | |
243 | */ | |
244 | if (vmhub == AMDGPU_GFXHUB_0) | |
245 | RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); | |
246 | ||
f9df67e9 HZ |
247 | /* Wait for ACK with a delay.*/ |
248 | for (i = 0; i < adev->usec_timeout; i++) { | |
249 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); | |
250 | tmp &= 1 << vmid; | |
251 | if (tmp) | |
252 | break; | |
253 | ||
254 | udelay(1); | |
255 | } | |
256 | ||
257 | if (i < adev->usec_timeout) | |
258 | return; | |
259 | ||
260 | DRM_ERROR("Timeout waiting for VM flush ACK!\n"); | |
261 | } | |
262 | ||
263 | /** | |
264 | * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback | |
265 | * | |
266 | * @adev: amdgpu_device pointer | |
267 | * @vmid: vm instance to flush | |
268 | * | |
269 | * Flush the TLB for the requested page table. | |
270 | */ | |
3ff98548 OZ |
271 | static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, |
272 | uint32_t vmhub, uint32_t flush_type) | |
f9df67e9 HZ |
273 | { |
274 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | |
275 | struct dma_fence *fence; | |
276 | struct amdgpu_job *job; | |
277 | ||
278 | int r; | |
279 | ||
280 | /* flush hdp cache */ | |
bebc0762 | 281 | adev->nbio.funcs->hdp_flush(adev, NULL); |
f9df67e9 HZ |
282 | |
283 | mutex_lock(&adev->mman.gtt_window_lock); | |
284 | ||
3ff98548 OZ |
285 | if (vmhub == AMDGPU_MMHUB_0) { |
286 | gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); | |
287 | mutex_unlock(&adev->mman.gtt_window_lock); | |
288 | return; | |
289 | } | |
290 | ||
291 | BUG_ON(vmhub != AMDGPU_GFXHUB_0); | |
292 | ||
767acabd KW |
293 | if (!adev->mman.buffer_funcs_enabled || |
294 | !adev->ib_pool_ready || | |
295 | adev->in_gpu_reset) { | |
a2d15ed7 | 296 | gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); |
f9df67e9 HZ |
297 | mutex_unlock(&adev->mman.gtt_window_lock); |
298 | return; | |
299 | } | |
300 | ||
301 | /* The SDMA on Navi has a bug which can theoretically result in memory | |
302 | * corruption if an invalidation happens at the same time as an VA | |
303 | * translation. Avoid this by doing the invalidation from the SDMA | |
304 | * itself. | |
305 | */ | |
306 | r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job); | |
307 | if (r) | |
308 | goto error_alloc; | |
309 | ||
310 | job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); | |
311 | job->vm_needs_flush = true; | |
312 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); | |
313 | r = amdgpu_job_submit(job, &adev->mman.entity, | |
314 | AMDGPU_FENCE_OWNER_UNDEFINED, &fence); | |
315 | if (r) | |
316 | goto error_submit; | |
317 | ||
318 | mutex_unlock(&adev->mman.gtt_window_lock); | |
319 | ||
320 | dma_fence_wait(fence, false); | |
321 | dma_fence_put(fence); | |
322 | ||
323 | return; | |
324 | ||
325 | error_submit: | |
326 | amdgpu_job_free(job); | |
327 | ||
328 | error_alloc: | |
329 | mutex_unlock(&adev->mman.gtt_window_lock); | |
330 | DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); | |
331 | } | |
332 | ||
333 | static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, | |
334 | unsigned vmid, uint64_t pd_addr) | |
335 | { | |
336 | struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; | |
337 | uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); | |
338 | unsigned eng = ring->vm_inv_eng; | |
339 | ||
340 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), | |
341 | lower_32_bits(pd_addr)); | |
342 | ||
343 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), | |
344 | upper_32_bits(pd_addr)); | |
345 | ||
346 | amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req); | |
347 | ||
348 | /* wait for the invalidate to complete */ | |
349 | amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng, | |
350 | 1 << vmid, 1 << vmid); | |
351 | ||
352 | return pd_addr; | |
353 | } | |
354 | ||
355 | static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, | |
356 | unsigned pasid) | |
357 | { | |
358 | struct amdgpu_device *adev = ring->adev; | |
359 | uint32_t reg; | |
360 | ||
a2d15ed7 | 361 | if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) |
f9df67e9 HZ |
362 | reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; |
363 | else | |
364 | reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; | |
365 | ||
366 | amdgpu_ring_emit_wreg(ring, reg, pasid); | |
367 | } | |
368 | ||
369 | /* | |
370 | * PTE format on NAVI 10: | |
371 | * 63:59 reserved | |
372 | * 58:57 reserved | |
373 | * 56 F | |
374 | * 55 L | |
375 | * 54 reserved | |
376 | * 53:52 SW | |
377 | * 51 T | |
378 | * 50:48 mtype | |
379 | * 47:12 4k physical page base address | |
380 | * 11:7 fragment | |
381 | * 6 write | |
382 | * 5 read | |
383 | * 4 exe | |
384 | * 3 Z | |
385 | * 2 snooped | |
386 | * 1 system | |
387 | * 0 valid | |
388 | * | |
389 | * PDE format on NAVI 10: | |
390 | * 63:59 block fragment size | |
391 | * 58:55 reserved | |
392 | * 54 P | |
393 | * 53:48 reserved | |
394 | * 47:6 physical base address of PD or PTE | |
395 | * 5:3 reserved | |
396 | * 2 C | |
397 | * 1 system | |
398 | * 0 valid | |
399 | */ | |
f9df67e9 | 400 | |
71776b6d CK |
401 | static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) |
402 | { | |
403 | switch (flags) { | |
f9df67e9 | 404 | case AMDGPU_VM_MTYPE_DEFAULT: |
71776b6d | 405 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); |
f9df67e9 | 406 | case AMDGPU_VM_MTYPE_NC: |
71776b6d | 407 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); |
f9df67e9 | 408 | case AMDGPU_VM_MTYPE_WC: |
71776b6d | 409 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); |
f9df67e9 | 410 | case AMDGPU_VM_MTYPE_CC: |
71776b6d | 411 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); |
f9df67e9 | 412 | case AMDGPU_VM_MTYPE_UC: |
71776b6d | 413 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); |
f9df67e9 | 414 | default: |
71776b6d | 415 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); |
f9df67e9 | 416 | } |
f9df67e9 HZ |
417 | } |
418 | ||
419 | static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, | |
420 | uint64_t *addr, uint64_t *flags) | |
421 | { | |
422 | if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) | |
423 | *addr = adev->vm_manager.vram_base_offset + *addr - | |
424 | adev->gmc.vram_start; | |
425 | BUG_ON(*addr & 0xFFFF00000000003FULL); | |
426 | ||
427 | if (!adev->gmc.translate_further) | |
428 | return; | |
429 | ||
430 | if (level == AMDGPU_VM_PDB1) { | |
431 | /* Set the block fragment size */ | |
432 | if (!(*flags & AMDGPU_PDE_PTE)) | |
433 | *flags |= AMDGPU_PDE_BFS(0x9); | |
434 | ||
435 | } else if (level == AMDGPU_VM_PDB0) { | |
436 | if (*flags & AMDGPU_PDE_PTE) | |
437 | *flags &= ~AMDGPU_PDE_PTE; | |
438 | else | |
439 | *flags |= AMDGPU_PTE_TF; | |
440 | } | |
441 | } | |
442 | ||
cbfae36c CK |
443 | static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, |
444 | struct amdgpu_bo_va_mapping *mapping, | |
445 | uint64_t *flags) | |
446 | { | |
447 | *flags &= ~AMDGPU_PTE_EXECUTABLE; | |
448 | *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; | |
449 | ||
450 | *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; | |
451 | *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); | |
452 | ||
453 | if (mapping->flags & AMDGPU_PTE_PRT) { | |
454 | *flags |= AMDGPU_PTE_PRT; | |
455 | *flags |= AMDGPU_PTE_SNOOPED; | |
456 | *flags |= AMDGPU_PTE_LOG; | |
457 | *flags |= AMDGPU_PTE_SYSTEM; | |
458 | *flags &= ~AMDGPU_PTE_VALID; | |
459 | } | |
460 | } | |
461 | ||
f9df67e9 HZ |
462 | static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { |
463 | .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, | |
464 | .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, | |
465 | .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, | |
71776b6d | 466 | .map_mtype = gmc_v10_0_map_mtype, |
cbfae36c CK |
467 | .get_vm_pde = gmc_v10_0_get_vm_pde, |
468 | .get_vm_pte = gmc_v10_0_get_vm_pte | |
f9df67e9 HZ |
469 | }; |
470 | ||
471 | static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) | |
472 | { | |
473 | if (adev->gmc.gmc_funcs == NULL) | |
474 | adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; | |
475 | } | |
476 | ||
477 | static int gmc_v10_0_early_init(void *handle) | |
478 | { | |
479 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
480 | ||
481 | gmc_v10_0_set_gmc_funcs(adev); | |
482 | gmc_v10_0_set_irq_funcs(adev); | |
483 | ||
484 | adev->gmc.shared_aperture_start = 0x2000000000000000ULL; | |
485 | adev->gmc.shared_aperture_end = | |
486 | adev->gmc.shared_aperture_start + (4ULL << 30) - 1; | |
487 | adev->gmc.private_aperture_start = 0x1000000000000000ULL; | |
488 | adev->gmc.private_aperture_end = | |
489 | adev->gmc.private_aperture_start + (4ULL << 30) - 1; | |
490 | ||
491 | return 0; | |
492 | } | |
493 | ||
494 | static int gmc_v10_0_late_init(void *handle) | |
495 | { | |
496 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
497 | unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; | |
498 | unsigned i; | |
499 | ||
500 | for(i = 0; i < adev->num_rings; ++i) { | |
501 | struct amdgpu_ring *ring = adev->rings[i]; | |
502 | unsigned vmhub = ring->funcs->vmhub; | |
503 | ||
504 | ring->vm_inv_eng = vm_inv_eng[vmhub]++; | |
505 | dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", | |
506 | ring->idx, ring->name, ring->vm_inv_eng, | |
507 | ring->funcs->vmhub); | |
508 | } | |
509 | ||
510 | /* Engine 17 is used for GART flushes */ | |
511 | for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) | |
512 | BUG_ON(vm_inv_eng[i] > 17); | |
513 | ||
514 | return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); | |
515 | } | |
516 | ||
517 | static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, | |
518 | struct amdgpu_gmc *mc) | |
519 | { | |
520 | u64 base = 0; | |
521 | ||
393993ac | 522 | base = gfxhub_v2_0_get_fb_location(adev); |
f9df67e9 HZ |
523 | |
524 | amdgpu_gmc_vram_location(adev, &adev->gmc, base); | |
525 | amdgpu_gmc_gart_location(adev, mc); | |
526 | ||
527 | /* base offset of vram pages */ | |
528 | adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev); | |
529 | } | |
530 | ||
531 | /** | |
532 | * gmc_v10_0_mc_init - initialize the memory controller driver params | |
533 | * | |
534 | * @adev: amdgpu_device pointer | |
535 | * | |
536 | * Look up the amount of vram, vram width, and decide how to place | |
537 | * vram and gart within the GPU's physical address space. | |
538 | * Returns 0 for success. | |
539 | */ | |
540 | static int gmc_v10_0_mc_init(struct amdgpu_device *adev) | |
541 | { | |
542 | int chansize, numchan; | |
543 | ||
544 | if (!amdgpu_emu_mode) | |
545 | adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); | |
546 | else { | |
547 | /* hard code vram_width for emulation */ | |
548 | chansize = 128; | |
549 | numchan = 1; | |
550 | adev->gmc.vram_width = numchan * chansize; | |
551 | } | |
552 | ||
553 | /* Could aper size report 0 ? */ | |
554 | adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); | |
555 | adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); | |
556 | ||
557 | /* size in MB on si */ | |
558 | adev->gmc.mc_vram_size = | |
bebc0762 | 559 | adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; |
f9df67e9 HZ |
560 | adev->gmc.real_vram_size = adev->gmc.mc_vram_size; |
561 | adev->gmc.visible_vram_size = adev->gmc.aper_size; | |
562 | ||
563 | /* In case the PCI BAR is larger than the actual amount of vram */ | |
564 | if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) | |
565 | adev->gmc.visible_vram_size = adev->gmc.real_vram_size; | |
566 | ||
567 | /* set the gart size */ | |
568 | if (amdgpu_gart_size == -1) { | |
569 | switch (adev->asic_type) { | |
570 | case CHIP_NAVI10: | |
05d72b8d | 571 | case CHIP_NAVI14: |
4a0e815f | 572 | case CHIP_NAVI12: |
f9df67e9 HZ |
573 | default: |
574 | adev->gmc.gart_size = 512ULL << 20; | |
575 | break; | |
576 | } | |
577 | } else | |
578 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; | |
579 | ||
580 | gmc_v10_0_vram_gtt_location(adev, &adev->gmc); | |
581 | ||
582 | return 0; | |
583 | } | |
584 | ||
585 | static int gmc_v10_0_gart_init(struct amdgpu_device *adev) | |
586 | { | |
587 | int r; | |
588 | ||
589 | if (adev->gart.bo) { | |
590 | WARN(1, "NAVI10 PCIE GART already initialized\n"); | |
591 | return 0; | |
592 | } | |
593 | ||
594 | /* Initialize common gart structure */ | |
595 | r = amdgpu_gart_init(adev); | |
596 | if (r) | |
597 | return r; | |
598 | ||
599 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | |
600 | adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | | |
601 | AMDGPU_PTE_EXECUTABLE; | |
602 | ||
603 | return amdgpu_gart_table_vram_alloc(adev); | |
604 | } | |
605 | ||
606 | static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) | |
607 | { | |
608 | u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); | |
609 | unsigned size; | |
610 | ||
611 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { | |
612 | size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ | |
613 | } else { | |
614 | u32 viewport; | |
615 | u32 pitch; | |
616 | ||
617 | viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); | |
618 | pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH); | |
619 | size = (REG_GET_FIELD(viewport, | |
620 | HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * | |
621 | REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * | |
622 | 4); | |
623 | } | |
624 | /* return 0 if the pre-OS buffer uses up most of vram */ | |
625 | if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) { | |
626 | DRM_ERROR("Warning: pre-OS buffer uses most of vram, \ | |
627 | be aware of gart table overwrite\n"); | |
628 | return 0; | |
629 | } | |
630 | ||
631 | return size; | |
632 | } | |
633 | ||
634 | ||
635 | ||
636 | static int gmc_v10_0_sw_init(void *handle) | |
637 | { | |
638 | int r; | |
f9df67e9 HZ |
639 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
640 | ||
641 | gfxhub_v2_0_init(adev); | |
642 | mmhub_v2_0_init(adev); | |
643 | ||
644 | spin_lock_init(&adev->gmc.invalidate_lock); | |
645 | ||
646 | adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); | |
647 | switch (adev->asic_type) { | |
648 | case CHIP_NAVI10: | |
05d72b8d | 649 | case CHIP_NAVI14: |
4a0e815f | 650 | case CHIP_NAVI12: |
1daa2bfa | 651 | adev->num_vmhubs = 2; |
f9df67e9 HZ |
652 | /* |
653 | * To fulfill 4-level page support, | |
4a0e815f | 654 | * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12, |
f9df67e9 HZ |
655 | * block size 512 (9bit) |
656 | */ | |
657 | amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); | |
658 | break; | |
659 | default: | |
660 | break; | |
661 | } | |
662 | ||
663 | /* This interrupt is VMC page fault.*/ | |
664 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, | |
665 | VMC_1_0__SRCID__VM_FAULT, | |
666 | &adev->gmc.vm_fault); | |
667 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, | |
668 | UTCL2_1_0__SRCID__FAULT, | |
669 | &adev->gmc.vm_fault); | |
670 | if (r) | |
671 | return r; | |
672 | ||
673 | /* | |
674 | * Set the internal MC address mask This is the max address of the GPU's | |
675 | * internal address space. | |
676 | */ | |
677 | adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ | |
678 | ||
679 | /* | |
680 | * Reserve 8M stolen memory for navi10 like vega10 | |
681 | * TODO: will check if it's really needed on asic. | |
682 | */ | |
683 | if (amdgpu_emu_mode == 1) | |
684 | adev->gmc.stolen_size = 0; | |
685 | else | |
686 | adev->gmc.stolen_size = 9 * 1024 *1024; | |
687 | ||
244511f3 | 688 | r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); |
f9df67e9 | 689 | if (r) { |
f9df67e9 | 690 | printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); |
244511f3 | 691 | return r; |
f9df67e9 HZ |
692 | } |
693 | ||
694 | r = gmc_v10_0_mc_init(adev); | |
695 | if (r) | |
696 | return r; | |
697 | ||
698 | adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); | |
699 | ||
700 | /* Memory manager */ | |
701 | r = amdgpu_bo_init(adev); | |
702 | if (r) | |
703 | return r; | |
704 | ||
705 | r = gmc_v10_0_gart_init(adev); | |
706 | if (r) | |
707 | return r; | |
708 | ||
709 | /* | |
710 | * number of VMs | |
711 | * VMID 0 is reserved for System | |
712 | * amdgpu graphics/compute will use VMIDs 1-7 | |
713 | * amdkfd will use VMIDs 8-15 | |
714 | */ | |
a2d15ed7 LM |
715 | adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; |
716 | adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; | |
f9df67e9 HZ |
717 | |
718 | amdgpu_vm_manager_init(adev); | |
719 | ||
720 | return 0; | |
721 | } | |
722 | ||
723 | /** | |
724 | * gmc_v8_0_gart_fini - vm fini callback | |
725 | * | |
726 | * @adev: amdgpu_device pointer | |
727 | * | |
728 | * Tears down the driver GART/VM setup (CIK). | |
729 | */ | |
730 | static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) | |
731 | { | |
732 | amdgpu_gart_table_vram_free(adev); | |
733 | amdgpu_gart_fini(adev); | |
734 | } | |
735 | ||
736 | static int gmc_v10_0_sw_fini(void *handle) | |
737 | { | |
738 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
739 | ||
740 | amdgpu_vm_manager_fini(adev); | |
741 | gmc_v10_0_gart_fini(adev); | |
742 | amdgpu_gem_force_release(adev); | |
743 | amdgpu_bo_fini(adev); | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) | |
749 | { | |
750 | switch (adev->asic_type) { | |
751 | case CHIP_NAVI10: | |
05d72b8d | 752 | case CHIP_NAVI14: |
4a0e815f | 753 | case CHIP_NAVI12: |
f9df67e9 HZ |
754 | break; |
755 | default: | |
756 | break; | |
757 | } | |
758 | } | |
759 | ||
760 | /** | |
761 | * gmc_v10_0_gart_enable - gart enable | |
762 | * | |
763 | * @adev: amdgpu_device pointer | |
764 | */ | |
765 | static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) | |
766 | { | |
767 | int r; | |
768 | bool value; | |
769 | u32 tmp; | |
770 | ||
771 | if (adev->gart.bo == NULL) { | |
772 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); | |
773 | return -EINVAL; | |
774 | } | |
775 | ||
776 | r = amdgpu_gart_table_vram_pin(adev); | |
777 | if (r) | |
778 | return r; | |
779 | ||
780 | r = gfxhub_v2_0_gart_enable(adev); | |
781 | if (r) | |
782 | return r; | |
783 | ||
784 | r = mmhub_v2_0_gart_enable(adev); | |
785 | if (r) | |
786 | return r; | |
787 | ||
788 | tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL); | |
789 | tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK; | |
790 | WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp); | |
791 | ||
792 | tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); | |
793 | WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); | |
794 | ||
795 | /* Flush HDP after it is initialized */ | |
bebc0762 | 796 | adev->nbio.funcs->hdp_flush(adev, NULL); |
f9df67e9 HZ |
797 | |
798 | value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? | |
799 | false : true; | |
800 | ||
801 | gfxhub_v2_0_set_fault_enable_default(adev, value); | |
802 | mmhub_v2_0_set_fault_enable_default(adev, value); | |
3ff98548 OZ |
803 | gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); |
804 | gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); | |
f9df67e9 HZ |
805 | |
806 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | |
807 | (unsigned)(adev->gmc.gart_size >> 20), | |
808 | (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); | |
809 | ||
810 | adev->gart.ready = true; | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
815 | static int gmc_v10_0_hw_init(void *handle) | |
816 | { | |
817 | int r; | |
818 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
819 | ||
820 | /* The sequence of these two function calls matters.*/ | |
821 | gmc_v10_0_init_golden_registers(adev); | |
822 | ||
823 | r = gmc_v10_0_gart_enable(adev); | |
824 | if (r) | |
825 | return r; | |
826 | ||
827 | return 0; | |
828 | } | |
829 | ||
830 | /** | |
831 | * gmc_v10_0_gart_disable - gart disable | |
832 | * | |
833 | * @adev: amdgpu_device pointer | |
834 | * | |
835 | * This disables all VM page table. | |
836 | */ | |
837 | static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) | |
838 | { | |
839 | gfxhub_v2_0_gart_disable(adev); | |
840 | mmhub_v2_0_gart_disable(adev); | |
841 | amdgpu_gart_table_vram_unpin(adev); | |
842 | } | |
843 | ||
844 | static int gmc_v10_0_hw_fini(void *handle) | |
845 | { | |
846 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
847 | ||
848 | if (amdgpu_sriov_vf(adev)) { | |
849 | /* full access mode, so don't touch any GMC register */ | |
850 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); | |
851 | return 0; | |
852 | } | |
853 | ||
854 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); | |
855 | gmc_v10_0_gart_disable(adev); | |
856 | ||
857 | return 0; | |
858 | } | |
859 | ||
860 | static int gmc_v10_0_suspend(void *handle) | |
861 | { | |
862 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
863 | ||
864 | gmc_v10_0_hw_fini(adev); | |
865 | ||
866 | return 0; | |
867 | } | |
868 | ||
869 | static int gmc_v10_0_resume(void *handle) | |
870 | { | |
871 | int r; | |
872 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
873 | ||
874 | r = gmc_v10_0_hw_init(adev); | |
875 | if (r) | |
876 | return r; | |
877 | ||
878 | amdgpu_vmid_reset_all(adev); | |
879 | ||
880 | return 0; | |
881 | } | |
882 | ||
883 | static bool gmc_v10_0_is_idle(void *handle) | |
884 | { | |
885 | /* MC is always ready in GMC v10.*/ | |
886 | return true; | |
887 | } | |
888 | ||
889 | static int gmc_v10_0_wait_for_idle(void *handle) | |
890 | { | |
891 | /* There is no need to wait for MC idle in GMC v10.*/ | |
892 | return 0; | |
893 | } | |
894 | ||
895 | static int gmc_v10_0_soft_reset(void *handle) | |
896 | { | |
897 | return 0; | |
898 | } | |
899 | ||
900 | static int gmc_v10_0_set_clockgating_state(void *handle, | |
901 | enum amd_clockgating_state state) | |
902 | { | |
903 | int r; | |
904 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
905 | ||
906 | r = mmhub_v2_0_set_clockgating(adev, state); | |
907 | if (r) | |
908 | return r; | |
909 | ||
910 | return athub_v2_0_set_clockgating(adev, state); | |
911 | } | |
912 | ||
913 | static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) | |
914 | { | |
915 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
916 | ||
917 | mmhub_v2_0_get_clockgating(adev, flags); | |
918 | ||
919 | athub_v2_0_get_clockgating(adev, flags); | |
920 | } | |
921 | ||
922 | static int gmc_v10_0_set_powergating_state(void *handle, | |
923 | enum amd_powergating_state state) | |
924 | { | |
925 | return 0; | |
926 | } | |
927 | ||
928 | const struct amd_ip_funcs gmc_v10_0_ip_funcs = { | |
929 | .name = "gmc_v10_0", | |
930 | .early_init = gmc_v10_0_early_init, | |
931 | .late_init = gmc_v10_0_late_init, | |
932 | .sw_init = gmc_v10_0_sw_init, | |
933 | .sw_fini = gmc_v10_0_sw_fini, | |
934 | .hw_init = gmc_v10_0_hw_init, | |
935 | .hw_fini = gmc_v10_0_hw_fini, | |
936 | .suspend = gmc_v10_0_suspend, | |
937 | .resume = gmc_v10_0_resume, | |
938 | .is_idle = gmc_v10_0_is_idle, | |
939 | .wait_for_idle = gmc_v10_0_wait_for_idle, | |
940 | .soft_reset = gmc_v10_0_soft_reset, | |
941 | .set_clockgating_state = gmc_v10_0_set_clockgating_state, | |
942 | .set_powergating_state = gmc_v10_0_set_powergating_state, | |
943 | .get_clockgating_state = gmc_v10_0_get_clockgating_state, | |
944 | }; | |
945 | ||
946 | const struct amdgpu_ip_block_version gmc_v10_0_ip_block = | |
947 | { | |
948 | .type = AMD_IP_BLOCK_TYPE_GMC, | |
949 | .major = 10, | |
950 | .minor = 0, | |
951 | .rev = 0, | |
952 | .funcs = &gmc_v10_0_ip_funcs, | |
953 | }; |